Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Some patches for pithos, deploy, and burnin (resend) #377

Open
wants to merge 11 commits into
base: develop
Choose a base branch
from
3 changes: 3 additions & 0 deletions snf-deploy/conf/nodes.conf
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@
# Instances will reside in the .vm.<domain> subdomain
domain = synnefo.live

# This is the default forwarder to be used by bind
nameserver = 8.8.8.8

# Each node should define:

# The node's desired hostname. It will be set
Expand Down
6 changes: 3 additions & 3 deletions snf-deploy/files/etc/bind/named.conf.options
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@ options {
// Uncomment the following block, and insert the addresses replacing
// the all-0's placeholder.

// forwarders {
// 0.0.0.0;
// };
forwarders {
%NAMESERVERS%;
};

auth-nxdomain no; # conform to RFC1035
allow-recursion { %NODE_IPS%; };
Expand Down
29 changes: 23 additions & 6 deletions snf-deploy/snfdeploy/components.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2010-2015 GRNET S.A. and individual contributors
# Copyright (C) 2010-2016 GRNET S.A. and individual contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
Expand Down Expand Up @@ -192,13 +192,19 @@ def test(self):
class SSH(base.Component):
@base.run_cmds
def prepare(self):
if not config.key_inject:
return []

return [
"mkdir -p /root/.ssh",
"for f in $(ls /root/.ssh/*); do cp $f $f.bak ; done",
"echo StrictHostKeyChecking no >> /etc/ssh/ssh_config",
]

def _configure(self):
if not config.key_inject:
return []

files = [
"authorized_keys", "id_dsa", "id_dsa.pub", "id_rsa", "id_rsa.pub"
]
Expand All @@ -207,6 +213,9 @@ def _configure(self):

@base.run_cmds
def initialize(self):
if not config.key_inject:
return []

f = "/root/.ssh/authorized_keys"
return [
"test -e {0}.bak && cat {0}.bak >> {0} || true".format(f)
Expand Down Expand Up @@ -324,6 +333,10 @@ def prepare(self):
def _configure(self):
d = self.node.domain
ip = self.node.ip
r1 = {
"node_ips": ";".join(self.ctx.all_ips),
"nameservers": ";".join(self.ctx.all_nameservers),
}
return [
("/etc/bind/named.conf.local", {"domain": d}, {}),
("/etc/bind/zones/example.com",
Expand All @@ -334,8 +347,7 @@ def _configure(self):
{"remote": "/etc/bind/zones/vm.%s" % d}),
("/etc/bind/rev/synnefo.in-addr.arpa.zone", {"domain": d}, {}),
("/etc/bind/rev/synnefo.ip6.arpa.zone", {"domain": d}, {}),
("/etc/bind/named.conf.options",
{"node_ips": ";".join(self.ctx.all_ips)}, {}),
("/etc/bind/named.conf.options", r1, {}),
("/root/ddns/ddns.key", {}, {"remote": "/etc/bind/ddns.key"}),
]

Expand Down Expand Up @@ -518,7 +530,9 @@ def initialize(self):
# If extra disk found use it
# else create a raw file and losetup it
cmd = """
if [ -b "{0}" ]; then
if vgs {2}; then
echo "VG ${2} found!"
elif [ -b "{0}" ]; then
pvcreate {0} && vgcreate {2} {0}
else
truncate -s {3} {1}
Expand Down Expand Up @@ -762,9 +776,12 @@ def initialize(self):
--ipolicy-std-specs {2} \
--ipolicy-bounds-specs min:{3}/max:{4} \
--enabled-disk-templates file,ext \
{5}
--file-storage-dir {5}/ganeti/file-storage \
--shared-file-storage-dir {5}/ganeti/shared-file-storage \
{6}
""".format(config.common_bridge, self.cluster.netdev,
std, bound_min, bound_max, self.cluster.fqdn)
std, bound_min, bound_max, config.shared_dir,
self.cluster.fqdn)

modify = "gnt-node modify --vm-capable=no %s" % self.node.fqdn

Expand Down
3 changes: 2 additions & 1 deletion snf-deploy/snfdeploy/config.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2010-2014 GRNET S.A.
# Copyright (C) 2010-2016 GRNET S.A. and individual contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
Expand Down Expand Up @@ -136,6 +136,7 @@ def init(args):
config.dry_run = args.dry_run
config.force = args.force
config.ssh_key = args.ssh_key
config.key_inject = args.key_inject
config.mem = args.mem
config.vnc = args.vnc
config.smp = args.smp
Expand Down
7 changes: 6 additions & 1 deletion snf-deploy/snfdeploy/context.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2010-2014 GRNET S.A.
# Copyright (C) 2010-2016 GRNET S.A. and individual contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
Expand Down Expand Up @@ -119,6 +119,11 @@ def all_ips(self):
l = lambda x: config.get_node_info(x).ip
return [l(n) for n in self.all_nodes]

@property
def all_nameservers(self):
l = lambda x: config.get_node_info(x).nameserver
return set([l(n) for n in self.all_nodes])

def get(self, role):
try:
return config.get(self.setup, role)
Expand Down
7 changes: 7 additions & 0 deletions snf-pithos-app/conf/20-snf-pithos-app-settings.conf
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,13 @@
#
# The maximum interval (in seconds) for consequent backend object map checks
#PITHOS_BACKEND_MAP_CHECK_INTERVAL = 1
#
# Enable deletion of mapfiles after deleting a version of some object.
# This is option is *unsafe* for installatioins prior to Synnefo version
# 0.16rc1 (commit 13d49ad) which may still include Markle-hashes and not
# Archipelago mapfiles in Pithos database.
#PITHOS_BACKEND_PURGE_MAPFILES = False
#
# The archipelago mapfile prefix (it should not exceed 15 characters)
# WARNING: Once set it should not be changed
#PITHOS_BACKEND_MAPFILE_PREFIX='snf_file_'
Expand Down
4 changes: 4 additions & 0 deletions snf-pithos-app/pithos/api/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,10 @@ def __str__(self):
BACKEND_MAP_CHECK_INTERVAL = getattr(settings,
'PITHOS_BACKEND_MAP_CHECK_INTERVAL', 5)

# Whether to delete mapfiles or not
BACKEND_PURGE_MAPFILES = getattr(settings, 'PITHOS_BACKEND_PURGE_MAPFILES',
False)

# The archipelago mapfile prefix (it should not exceed 15 characters)
# WARNING: Once set it should not be changed
BACKEND_MAPFILE_PREFIX = getattr(settings,
Expand Down
2 changes: 2 additions & 0 deletions snf-pithos-app/pithos/api/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@

from pithos.api.settings import (BACKEND_DB_MODULE, BACKEND_DB_CONNECTION,
BACKEND_BLOCK_MODULE, BACKEND_BLOCK_KWARGS,
BACKEND_PURGE_MAPFILES,
ASTAKOSCLIENT_POOLSIZE,
SERVICE_TOKEN,
ASTAKOS_AUTH_URL,
Expand Down Expand Up @@ -1008,6 +1009,7 @@ def simple_list_response(request, l):
service_token=SERVICE_TOKEN,
astakosclient_poolsize=ASTAKOSCLIENT_POOLSIZE,
free_versioning=BACKEND_FREE_VERSIONING,
purge_mapfiles=BACKEND_PURGE_MAPFILES,
block_params=BACKEND_BLOCK_KWARGS,
public_url_security=PUBLIC_URL_SECURITY,
public_url_alphabet=PUBLIC_URL_ALPHABET,
Expand Down
51 changes: 31 additions & 20 deletions snf-pithos-backend/pithos/backends/lib/sqlalchemy/node.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2010-2014 GRNET S.A.
# Copyright (C) 2010-2016 GRNET S.A. and individual contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
Expand Down Expand Up @@ -369,10 +369,12 @@ def node_count_children(self, node):

def node_purge_children(self, parent, before=inf, cluster=0,
update_statistics_ancestors_depth=None):
"""Delete all versions with the specified
parent and cluster, and return
the hashes, the total size and the serials of versions deleted.
Clears out nodes with no remaining versions.
"""Delete all versions with the specified parent and cluster.

Returns the hashes, the total size, serials and the names of the
mapfiles of the versions that have been deleted. Clears out nodes with
no remaining versions.

"""
#update statistics
c1 = select([self.nodes.c.node],
Expand All @@ -389,21 +391,24 @@ def node_purge_children(self, parent, before=inf, cluster=0,
row = r.fetchone()
r.close()
if not row:
return (), 0, ()
return (), 0, (), ()
nr, size = row[0], row[1] if row[1] else 0
mtime = time()
self.statistics_update(parent, -nr, -size, mtime, cluster)
self.statistics_update_ancestors(parent, -nr, -size, mtime, cluster,
update_statistics_ancestors_depth)

s = select([self.versions.c.hash, self.versions.c.serial])
s = select([self.versions.c.hash, self.versions.c.serial,
self.versions.c.mapfile])
s = s.where(where_clause)
r = self.conn.execute(s)
hashes = []
serials = []
mapfiles = []
for row in r.fetchall():
hashes += [row[0]]
serials += [row[1]]
hashes.append(row[0])
serials.append(row[1])
mapfiles.append(row[2])
r.close()

#delete versions
Expand All @@ -424,14 +429,16 @@ def node_purge_children(self, parent, before=inf, cluster=0,
s = self.nodes.delete().where(self.nodes.c.node.in_(nodes))
self.conn.execute(s).close()

return hashes, size, serials
return hashes, size, serials, mapfiles

def node_purge(self, node, before=inf, cluster=0,
update_statistics_ancestors_depth=None):
"""Delete all versions with the specified
node and cluster, and return
the hashes and size of versions deleted.
Clears out the node if it has no remaining versions.
"""Delete all versions with the specified node and cluster.

Returns the hashes, size and the name of the mapfiles of the versions
that have been deleted. Clears out the node if it has no remaining
versions.

"""

#update statistics
Expand All @@ -448,19 +455,22 @@ def node_purge(self, node, before=inf, cluster=0,
nr, size = row[0], row[1]
r.close()
if not nr:
return (), 0, ()
return (), 0, (), ()
mtime = time()
self.statistics_update_ancestors(node, -nr, -size, mtime, cluster,
update_statistics_ancestors_depth)

s = select([self.versions.c.hash, self.versions.c.serial])
s = select([self.versions.c.hash, self.versions.c.serial,
self.versions.c.mapfile])
s = s.where(where_clause)
r = self.conn.execute(s)
hashes = []
serials = []
mapfiles = []
for row in r.fetchall():
hashes += [row[0]]
serials += [row[1]]
hashes.append(row[0])
serials.append(row[1])
mapfiles.append(row[2])
r.close()

#delete versions
Expand All @@ -481,7 +491,7 @@ def node_purge(self, node, before=inf, cluster=0,
s = self.nodes.delete().where(self.nodes.c.node.in_(nodes))
self.conn.execute(s).close()

return hashes, size, serials
return hashes, size, serials, mapfiles

def node_remove(self, node, update_statistics_ancestors_depth=None):
"""Remove the node specified.
Expand Down Expand Up @@ -1028,6 +1038,7 @@ def version_remove(self, serial, update_statistics_ancestors_depth=None):
hash = props.hash
size = props.size
cluster = props.cluster
mapfile = props.mapfile

mtime = time()
self.statistics_update_ancestors(node, -1, -size, mtime, cluster,
Expand All @@ -1040,7 +1051,7 @@ def version_remove(self, serial, update_statistics_ancestors_depth=None):
if props:
self.nodes_set_latest_version(node, serial)

return hash, size
return hash, size, mapfile

def attribute_get_domains(self, serial, node=None):
node = node or select([self.versions.c.node],
Expand Down
Loading