diff --git a/Makefile.am b/Makefile.am index 4cd7ff954389af476fa86ccf8fa24d5e03001d36..a9ef001c3957643d1ce0d9313890737a74a2fd56 100644 --- a/Makefile.am +++ b/Makefile.am @@ -294,6 +294,7 @@ docrst = \ doc/design-multi-reloc.rst \ doc/design-network.rst \ doc/design-chained-jobs.rst \ + doc/design-ovf-support.rst \ doc/cluster-merge.rst \ doc/design-shared-storage.rst \ doc/devnotes.rst \ diff --git a/NEWS b/NEWS index be7159578db3ffe382ba5570005b65bedc337e86..a6689a3ef173d835028c499f5f240bcd4eeb1a56 100644 --- a/NEWS +++ b/NEWS @@ -1,10 +1,10 @@ News ==== -Version 2.5.0 beta1 +Version 2.5.0 beta2 ------------------- -*(Released Fri, 12 Aug 2011)* +*(Released Mon, 22 Aug 2011)* Incompatible/important changes and bugfixes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -84,6 +84,7 @@ New features - ``gnt-instance info`` defaults to static information if primary node is offline. - Opcodes have a new ``comment`` attribute. +- Added basic SPICE support to KVM hypervisor. Node group improvements ~~~~~~~~~~~~~~~~~~~~~~~ @@ -127,6 +128,14 @@ Misc - DRBD metadata volumes are overwritten with zeros during disk creation. +Version 2.5.0 beta1 +------------------- + +*(Released Fri, 12 Aug 2011)* + +This was the first beta release of the 2.5 series. + + Version 2.4.3 ------------- diff --git a/autotools/check-news b/autotools/check-news index 879400c091e680ef85c0169d463bccd8270f06f3..51a0403acc7a5bc6574352ae40c1fbeccdcc5e89 100755 --- a/autotools/check-news +++ b/autotools/check-news @@ -51,7 +51,7 @@ def main(): if DASHES_RE.match(line): if not prevline.startswith("Version "): - raise Exception("Line %s: Invalid title" % (fileinput.lineno() - 1)) + raise Exception("Line %s: Invalid title" % (fileinput.filelineno() - 1)) expect_date = True elif expect_date: @@ -66,7 +66,8 @@ def main(): m = RELEASED_RE.match(line) if not m: - raise Exception("Line %s: Invalid release line" % fileinput.lineno()) + raise Exception("Line %s: Invalid release line" % + fileinput.filelineno()) # Including the weekday in the date string does not work as time.strptime # would return an inconsistent result if the weekday is incorrect. @@ -77,7 +78,8 @@ def main(): # Check weekday if m.group("day") != weekday: raise Exception("Line %s: %s was/is a %s, not %s" % - (fileinput.lineno(), parsed, weekday, m.group("day"))) + (fileinput.filelineno(), parsed, weekday, + m.group("day"))) expect_date = False diff --git a/configure.ac b/configure.ac index 601adfce8661144114ceaf2d8548c425171831cd..25491a1651201551e73ca1aa5a40e8a57a1a47bc 100644 --- a/configure.ac +++ b/configure.ac @@ -2,7 +2,7 @@ m4_define([gnt_version_major], [2]) m4_define([gnt_version_minor], [5]) m4_define([gnt_version_revision], [0]) -m4_define([gnt_version_suffix], [~beta1]) +m4_define([gnt_version_suffix], [~beta2]) m4_define([gnt_version_full], m4_format([%d.%d.%d%s], gnt_version_major, gnt_version_minor, diff --git a/daemons/import-export b/daemons/import-export index 428f5a6f6392f1e833e0a8e8149220cb68c2862a..a63e4c24c55c93388210adad05ed75f7192a3f83 100755 --- a/daemons/import-export +++ b/daemons/import-export @@ -128,7 +128,7 @@ class StatusFile: @param port: TCP/UDP port """ - assert isinstance(port, (int, long)) and 0 < port < 2**16 + assert isinstance(port, (int, long)) and 0 < port < (2 ** 16) self._data.listen_port = port def GetListenPort(self): diff --git a/lib/asyncnotifier.py b/lib/asyncnotifier.py index 9d5610ee78df04fb6dd877a44db3167091881c92..6f74542a989d39244acff5a1b139d50da9b380d2 100644 --- a/lib/asyncnotifier.py +++ b/lib/asyncnotifier.py @@ -34,6 +34,7 @@ except ImportError: from ganeti import daemon from ganeti import errors + # We contributed the AsyncNotifier class back to python-pyinotify, and it's # part of their codebase since version 0.8.7. This code can be removed once # we'll be ready to depend on python-pyinotify >= 0.8.7 diff --git a/lib/backend.py b/lib/backend.py index 953b516ccd009d567b861beddb912c919090314f..84a39d07de6a1e6dff287d54108046e4eafbf3a6 100644 --- a/lib/backend.py +++ b/lib/backend.py @@ -730,7 +730,7 @@ def GetVolumeList(vg_names): # we don't want to report such volumes as existing, since they # don't really hold data continue - lvs[vg_name+"/"+name] = (size, inactive, online) + lvs[vg_name + "/" + name] = (size, inactive, online) return lvs @@ -2359,7 +2359,7 @@ def FinalizeExport(instance, snap_disks): config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param), "%s" % nic.nicparams.get(param, None)) # TODO: redundant: on load can read nics until it doesn't exist - config.set(constants.INISECT_INS, "nic_count" , "%d" % nic_total) + config.set(constants.INISECT_INS, "nic_count", "%d" % nic_total) disk_total = 0 for disk_count, disk in enumerate(snap_disks): @@ -2372,7 +2372,7 @@ def FinalizeExport(instance, snap_disks): config.set(constants.INISECT_INS, "disk%d_size" % disk_count, ("%d" % disk.size)) - config.set(constants.INISECT_INS, "disk_count" , "%d" % disk_total) + config.set(constants.INISECT_INS, "disk_count", "%d" % disk_total) # New-style hypervisor/backend parameters @@ -3378,7 +3378,6 @@ class HooksRunner(object): else: _Fail("Unknown hooks phase '%s'", phase) - subdir = "%s-%s.d" % (hpath, suffix) dir_name = utils.PathJoin(self._BASE_DIR, subdir) diff --git a/lib/bdev.py b/lib/bdev.py index 5cf45c9a869b828acc568b6fd11b8c2127c7d832..905ad1fc13b7f6490977776a986e6dd5d9ac95e2 100644 --- a/lib/bdev.py +++ b/lib/bdev.py @@ -321,7 +321,6 @@ class BlockDev(object): is_degraded=is_degraded, ldisk_status=ldisk_status) - def SetInfo(self, text): """Update metadata with info text. @@ -408,12 +407,12 @@ class LogicalVolume(BlockDev): pvs_info.sort() pvs_info.reverse() - pvlist = [ pv[1] for pv in pvs_info ] + pvlist = [pv[1] for pv in pvs_info] if compat.any(":" in v for v in pvlist): _ThrowError("Some of your PVs have the invalid character ':' in their" " name, this is not supported - please filter them out" " in lvm.conf using either 'filter' or 'preferred_names'") - free_size = sum([ pv[0] for pv in pvs_info ]) + free_size = sum([pv[0] for pv in pvs_info]) current_pvs = len(pvlist) stripes = min(current_pvs, constants.LVM_STRIPECOUNT) @@ -1113,7 +1112,7 @@ class DRBD8(BaseDRBD): super(DRBD8, self).__init__(unique_id, children, size) self.major = self._DRBD_MAJOR version = self._GetVersion(self._GetProcData()) - if version["k_major"] != 8 : + if version["k_major"] != 8: _ThrowError("Mismatch in DRBD kernel version and requested ganeti" " usage: kernel is %s.%s, ganeti wants 8.x", version["k_major"], version["k_minor"]) @@ -1194,7 +1193,7 @@ class DRBD8(BaseDRBD): # this also converts the value to an int number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0])) - comment = pyp.Literal ("#") + pyp.Optional(pyp.restOfLine) + comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine) defa = pyp.Literal("_is_default").suppress() dbl_quote = pyp.Literal('"').suppress() diff --git a/lib/bootstrap.py b/lib/bootstrap.py index 4bc9ebf60d5b3c43afcdf0b001b3270188cd2603..3e75dfa124e2b18e4cd92e651e374d4dd8d5711f 100644 --- a/lib/bootstrap.py +++ b/lib/bootstrap.py @@ -270,7 +270,6 @@ def InitCluster(cluster_name, mac_prefix, # pylint: disable-msg=R0913 " entries: %s" % invalid_hvs, errors.ECODE_INVAL) - ipcls = None if primary_ip_version == constants.IP4_VERSION: ipcls = netutils.IP4Address @@ -661,6 +660,7 @@ def MasterFailover(no_voting=False): master_ip = sstore.GetMasterIP() total_timeout = 30 + # Here we have a phase where no master should be running def _check_ip(): if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT): diff --git a/lib/build/sphinx_ext.py b/lib/build/sphinx_ext.py index 2abfe6e0d81b25e11e87267a212ff998f9c8ad4b..474721dccdeec46626972b832f0a9d76c48ce2d5 100644 --- a/lib/build/sphinx_ext.py +++ b/lib/build/sphinx_ext.py @@ -32,6 +32,8 @@ import docutils.utils import sphinx.errors import sphinx.util.compat +s_compat = sphinx.util.compat + from ganeti import constants from ganeti import compat from ganeti import errors @@ -147,7 +149,7 @@ def _BuildOpcodeResult(op_id): return "``%s``" % result_fn -class OpcodeParams(sphinx.util.compat.Directive): +class OpcodeParams(s_compat.Directive): """Custom directive for opcode parameters. See also <http://docutils.sourceforge.net/docs/howto/rst-directives.html>. @@ -178,7 +180,7 @@ class OpcodeParams(sphinx.util.compat.Directive): return [] -class OpcodeResult(sphinx.util.compat.Directive): +class OpcodeResult(s_compat.Directive): """Custom directive for opcode result. See also <http://docutils.sourceforge.net/docs/howto/rst-directives.html>. @@ -230,7 +232,7 @@ def PythonEvalRole(role, rawtext, text, lineno, inliner, return ([node], []) -class PythonAssert(sphinx.util.compat.Directive): +class PythonAssert(s_compat.Directive): """Custom directive for writing assertions. The content must be a valid Python expression. If its result does not diff --git a/lib/cli.py b/lib/cli.py index 4aeb16216f0a3695fcc5a6fae6848432c1937cab..f55804d04bd6809e41af4f5079e96d70351a1ea1 100644 --- a/lib/cli.py +++ b/lib/cli.py @@ -728,9 +728,9 @@ BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams", type="keyval", default={}, help="Backend parameters") -HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval", - default={}, dest="hvparams", - help="Hypervisor parameters") +HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval", + default={}, dest="hvparams", + help="Hypervisor parameters") HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor", help="Hypervisor and hypervisor options, in the" @@ -2815,7 +2815,7 @@ def FormatTimestamp(ts): @return: a string with the formatted timestamp """ - if not isinstance (ts, (tuple, list)) or len(ts) != 2: + if not isinstance(ts, (tuple, list)) or len(ts) != 2: return "?" sec, usec = ts return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec diff --git a/lib/client/gnt_debug.py b/lib/client/gnt_debug.py index ecb3fedf85e7a66cb61eaacdd40691b63078caac..0a73525cf14f0c68d333cffd8f07df5e4064c8a4 100644 --- a/lib/client/gnt_debug.py +++ b/lib/client/gnt_debug.py @@ -113,9 +113,9 @@ def GenericOpCodes(opts, args): t3 = time.time() ToStdout("C:op %4d" % op_cnt) ToStdout("C:job %4d" % job_cnt) - ToStdout("T:submit %4.4f" % (t2-t1)) - ToStdout("T:exec %4.4f" % (t3-t2)) - ToStdout("T:total %4.4f" % (t3-t1)) + ToStdout("T:submit %4.4f" % (t2 - t1)) + ToStdout("T:exec %4.4f" % (t3 - t2)) + ToStdout("T:total %4.4f" % (t3 - t1)) return 0 @@ -689,5 +689,6 @@ aliases = { "allocator": "iallocator", } + def Main(): return GenericMain(commands, aliases=aliases) diff --git a/lib/client/gnt_instance.py b/lib/client/gnt_instance.py index 647bb9475d9ae580a09557c913818d3d6444bb20..7f51327a57ee370cfbeff0ad017cfa4d50f351d3 100644 --- a/lib/client/gnt_instance.py +++ b/lib/client/gnt_instance.py @@ -1119,13 +1119,13 @@ def _FormatList(buf, data, indent_level): if isinstance(elem, tuple)] or [0]) for elem in data: if isinstance(elem, basestring): - buf.write("%*s%s\n" % (2*indent_level, "", elem)) + buf.write("%*s%s\n" % (2 * indent_level, "", elem)) elif isinstance(elem, tuple): key, value = elem spacer = "%*s" % (max_tlen - len(key), "") - buf.write("%*s%s:%s %s\n" % (2*indent_level, "", key, spacer, value)) + buf.write("%*s%s:%s %s\n" % (2 * indent_level, "", key, spacer, value)) elif isinstance(elem, list): - _FormatList(buf, elem, indent_level+1) + _FormatList(buf, elem, indent_level + 1) def ShowInstanceConfig(opts, args): @@ -1179,7 +1179,7 @@ def ShowInstanceConfig(opts, args): buf.write(" Operating system: %s\n" % instance["os"]) FormatParameterDict(buf, instance["os_instance"], instance["os_actual"], level=2) - if instance.has_key("network_port"): + if "network_port" in instance: buf.write(" Allocated network port: %s\n" % compat.TryToRoman(instance["network_port"], convert=opts.roman_integers)) diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 76282c87e2734de0a3f5d8cd50644ca97530936e..a57a0d9b02e90da8425dc39a5ecf6cebfd0c7cac 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -4085,6 +4085,7 @@ class LUOobCommand(NoHooksLU): raise errors.OpExecError("Check of out-of-band payload failed due to %s" % utils.CommaJoin(errs)) + class _OsQuery(_QueryBase): FIELDS = query.OS_FIELDS @@ -7950,7 +7951,7 @@ def _ComputeDiskSizePerVG(disk_template, disks): if disk_template not in req_size_dict: raise errors.ProgrammerError("Disk template '%s' size requirement" - " is unknown" % disk_template) + " is unknown" % disk_template) return req_size_dict[disk_template] @@ -7972,7 +7973,7 @@ def _ComputeDiskSize(disk_template, disks): if disk_template not in req_size_dict: raise errors.ProgrammerError("Disk template '%s' size requirement" - " is unknown" % disk_template) + " is unknown" % disk_template) return req_size_dict[disk_template] @@ -8252,8 +8253,8 @@ class LUInstanceCreate(LogicalUnit): self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET self.op.src_node = None if os.path.isabs(src_path): - raise errors.OpPrereqError("Importing an instance from an absolute" - " path requires a source node option", + raise errors.OpPrereqError("Importing an instance from a path" + " requires a source node option", errors.ECODE_INVAL) else: self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node) @@ -8883,7 +8884,7 @@ class LUInstanceCreate(LogicalUnit): # 'fake' LV disks with the old data, plus the new unique_id tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] rename_to = [] - for t_dsk, a_dsk in zip (tmp_disks, self.disks): + for t_dsk, a_dsk in zip(tmp_disks, self.disks): rename_to.append(t_dsk.logical_id) t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT]) self.cfg.SetDiskID(t_dsk, pnode_name) @@ -10823,7 +10824,7 @@ class LUInstanceSetParams(LogicalUnit): if msg: # Assume the primary node is unreachable and go ahead self.warn.append("Can't get info from primary node %s: %s" % - (pnode, msg)) + (pnode, msg)) elif not isinstance(pninfo.payload.get("memory_free", None), int): self.warn.append("Node data from primary node %s doesn't contain" " free memory information" % pnode) @@ -12140,7 +12141,6 @@ class LUGroupSetParams(LogicalUnit): return result - class LUGroupRemove(LogicalUnit): HPATH = "group-remove" HTYPE = constants.HTYPE_GROUP diff --git a/lib/compat.py b/lib/compat.py index a1d35da55a4d1da8699f0c7bcc10366ea42ab19d..90799205798aecfd1e1e093ed188c0e0fa14cdf5 100644 --- a/lib/compat.py +++ b/lib/compat.py @@ -67,6 +67,7 @@ def _all(seq): return False return True + def _any(seq): """Returns True if any element of the iterable are True. @@ -75,6 +76,7 @@ def _any(seq): return True return False + try: # pylint: disable-msg=E0601 # pylint: disable-msg=W0622 @@ -89,6 +91,7 @@ try: except NameError: any = _any + def partition(seq, pred=bool): # pylint: disable-msg=W0622 """Partition a list in two, based on the given predicate. diff --git a/lib/confd/__init__.py b/lib/confd/__init__.py index 895884c35e4ecea5f4583728e99caa8cdfc1c3d3..6527a9310cc87bb0070ecde0a64ef0440776b3d5 100644 --- a/lib/confd/__init__.py +++ b/lib/confd/__init__.py @@ -50,4 +50,3 @@ def UnpackMagic(payload): raise errors.ConfdMagicError("UDP payload contains an unkown fourcc") return payload[_FOURCC_LEN:] - diff --git a/lib/confd/client.py b/lib/confd/client.py index 2ca2ed8fb9c67e6d7f5424af391d5aca91504e59..aec2a7f5b0f0a61a7d3f77d50a29cace957a7730 100644 --- a/lib/confd/client.py +++ b/lib/confd/client.py @@ -333,7 +333,7 @@ class ConfdClient: elif peer_cnt < 5: return peer_cnt - 1 else: - return int(peer_cnt/2) + 1 + return int(peer_cnt / 2) + 1 def WaitForReply(self, salt, timeout=constants.CONFD_CLIENT_EXPIRE_TIMEOUT): """Wait for replies to a given request. diff --git a/lib/confd/querylib.py b/lib/confd/querylib.py index 81ea2b29c831362811fa8f1ffeb5044e0178ef09..4ad51e78855e3bffbccd72042182b2744fcc7753 100644 --- a/lib/confd/querylib.py +++ b/lib/confd/querylib.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2009, Google Inc. +# Copyright (C) 2009 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by diff --git a/lib/confd/server.py b/lib/confd/server.py index e3dc58e07ecd796b21e058cb55a11a841bf529af..74fb5199161d5c075353b6612a056b9311e5e301 100644 --- a/lib/confd/server.py +++ b/lib/confd/server.py @@ -1,7 +1,7 @@ # # -# Copyright (C) 2009, Google Inc. +# Copyright (C) 2009 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by diff --git a/lib/config.py b/lib/config.py index 78c7b4ba8455302d3784963e709d641faed95e8e..df4f84fa6b79b0ec9f7e4ea73da4faf3f08a90d5 100644 --- a/lib/config.py +++ b/lib/config.py @@ -539,7 +539,6 @@ class ConfigWriter: cluster.SimpleFillND(nodegroup.ndparams), constants.NDS_PARAMETER_TYPES) - # drbd minors check _, duplicates = self._UnlockedComputeDRBDMap() for node, minor, instance_a, instance_b in duplicates: diff --git a/lib/constants.py b/lib/constants.py index 39fa8d9152fe2dadfe301cda0237d7c74b8c3597..9e48997c66adf5e45060efec164812b1367a78c7 100644 --- a/lib/constants.py +++ b/lib/constants.py @@ -116,7 +116,7 @@ CPU_PINNING_ALL_XEN = "0-63" # Wipe DD_CMD = "dd" -WIPE_BLOCK_SIZE = 1024**2 +WIPE_BLOCK_SIZE = 1024 ** 2 MAX_WIPE_CHUNK = 1024 # 1GB MIN_WIPE_CHUNK_PERCENT = 10 @@ -139,7 +139,11 @@ IMPORT_EXPORT_DIR_MODE = 0755 ADOPTABLE_BLOCKDEV_ROOT = "/dev/disk/" # keep RUN_GANETI_DIR first here, to make sure all get created when the node # daemon is started (this takes care of RUN_DIR being tmpfs) -SUB_RUN_DIRS = [ RUN_GANETI_DIR, BDEV_CACHE_DIR, DISK_LINKS_DIR ] +SUB_RUN_DIRS = [ + RUN_GANETI_DIR, + BDEV_CACHE_DIR, + DISK_LINKS_DIR, + ] LOCK_DIR = _autoconf.LOCALSTATEDIR + "/lock" SSCONF_LOCK_FILE = LOCK_DIR + "/ganeti-ssconf.lock" # User-id pool lock directory @@ -1079,6 +1083,7 @@ JOB_QUEUE_DIRS = [QUEUE_DIR, JOB_QUEUE_ARCHIVE_DIR] JOB_QUEUE_DIRS_MODE = SECURE_DIR_MODE JOB_ID_TEMPLATE = r"\d+" +JOB_FILE_RE = re.compile(r"^job-(%s)$" % JOB_ID_TEMPLATE) # unchanged job return JOB_NOTCHANGED = "nochange" @@ -1222,7 +1227,7 @@ RS_ALL = frozenset([ #: Dictionary with special field cases and their verbose/terse formatting RSS_DESCRIPTION = { RS_UNKNOWN: ("(unknown)", "??"), - RS_NODATA: ("(nodata)", "?"), + RS_NODATA: ("(nodata)", "?"), RS_OFFLINE: ("(offline)", "*"), RS_UNAVAIL: ("(unavail)", "-"), } @@ -1254,6 +1259,8 @@ SS_MAINTAIN_NODE_HEALTH = "maintain_node_health" SS_UID_POOL = "uid_pool" SS_NODEGROUPS = "nodegroups" +SS_FILE_PERMS = 0444 + # cluster wide default parameters DEFAULT_ENABLED_HYPERVISOR = HT_XEN_PVM @@ -1454,7 +1461,7 @@ MAX_UDP_DATA_SIZE = 61440 # User-id pool minimum/maximum acceptable user-ids. UIDPOOL_UID_MIN = 0 -UIDPOOL_UID_MAX = 2**32-1 # Assuming 32 bit user-ids +UIDPOOL_UID_MAX = 2 ** 32 - 1 # Assuming 32 bit user-ids # Name or path of the pgrep command PGREP = "pgrep" diff --git a/lib/errors.py b/lib/errors.py index df743d7d108da33177a3a6cc14fe27305baa59a4..181288618d25bea24fd5c08435029bd49ad9ac06 100644 --- a/lib/errors.py +++ b/lib/errors.py @@ -482,4 +482,4 @@ def MaybeRaise(result): error = GetEncodedError(result) if error: (errcls, args) = error - raise errcls, args + raise errcls(args) diff --git a/lib/http/client.py b/lib/http/client.py index 4c7ad61485584a5f7e9c5e90534fb9c6ba753b2b..84cd0b7fa1e9d051ff3e56c7356428cac5f6a174 100644 --- a/lib/http/client.py +++ b/lib/http/client.py @@ -319,7 +319,7 @@ class HttpClientPool: """ try: - pclient = self._pool.pop(identity) + pclient = self._pool.pop(identity) except KeyError: # Need to create new client client = self._GetHttpClientCreator()(self._curl_config_fn) diff --git a/lib/hypervisor/hv_base.py b/lib/hypervisor/hv_base.py index 33222e8849198574553bc5d407dee24727558e64..074017f75940f7943cd372ba60109f252efa2dd7 100644 --- a/lib/hypervisor/hv_base.py +++ b/lib/hypervisor/hv_base.py @@ -414,11 +414,11 @@ class BaseHypervisor(object): key = splitfields[0].strip() val = splitfields[1].strip() if key == "MemTotal": - result["memory_total"] = int(val.split()[0])/1024 + result["memory_total"] = int(val.split()[0]) / 1024 elif key in ("MemFree", "Buffers", "Cached"): - sum_free += int(val.split()[0])/1024 + sum_free += int(val.split()[0]) / 1024 elif key == "Active": - result["memory_dom0"] = int(val.split()[0])/1024 + result["memory_dom0"] = int(val.split()[0]) / 1024 except (ValueError, TypeError), err: raise errors.HypervisorError("Failed to compute memory usage: %s" % (err,)) diff --git a/lib/hypervisor/hv_kvm.py b/lib/hypervisor/hv_kvm.py index 9407f62a3b05b13799dfe19fbaa85fe786a57aee..d5d057fc63fe2a4f4efd988bc089c872dc9e3adf 100644 --- a/lib/hypervisor/hv_kvm.py +++ b/lib/hypervisor/hv_kvm.py @@ -268,7 +268,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): arg_list = cmdline.split("\x00") while arg_list: - arg = arg_list.pop(0) + arg = arg_list.pop(0) if arg == "-name": instance = arg_list.pop(0) elif arg == "-m": @@ -527,7 +527,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): """ _, v_major, v_min, _ = self._GetKVMVersion() - pidfile = self._InstancePidFile(instance.name) + pidfile = self._InstancePidFile(instance.name) kvm = constants.KVM_PATH kvm_cmd = [kvm] # used just by the vnc server, if enabled @@ -722,6 +722,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): kvm_cmd.extend(["-serial", "none"]) spice_bind = hvp[constants.HV_KVM_SPICE_BIND] + spice_ip_version = None if spice_bind: if netutils.IsValidInterface(spice_bind): # The user specified a network interface, we have to figure out the IP @@ -747,8 +748,11 @@ class KVMHypervisor(hv_base.BaseHypervisor): cluster_family) elif addresses[constants.IP4_VERSION]: spice_ip_version = constants.IP4_VERSION - else: + elif addresses[constants.IP6_VERSION]: spice_ip_version = constants.IP6_VERSION + else: + raise errors.HypervisorError("spice: unable to get an IP address" + " for %s" % (spice_bind)) spice_address = addresses[spice_ip_version][0] @@ -757,11 +761,10 @@ class KVMHypervisor(hv_base.BaseHypervisor): # ValidateParameters checked it. spice_address = spice_bind - spice_arg = "addr=%s,ipv%s,port=%s" % (spice_address, - spice_ip_version, - instance.network_port) - - spice_arg = "%s,disable-ticketing" % spice_arg + spice_arg = "addr=%s,port=%s,disable-ticketing" % (spice_address, + instance.network_port) + if spice_ip_version: + spice_arg = "%s,ipv%s" % (spice_arg, spice_ip_version) logging.info("KVM: SPICE will listen on port %s", instance.network_port) kvm_cmd.extend(["-spice", spice_arg]) diff --git a/lib/hypervisor/hv_xen.py b/lib/hypervisor/hv_xen.py index 00445abca4ada8f40d1270f026a78addc194efa1..5a772d0b58aa87a474ec8a161d33e51c45f47fbe 100644 --- a/lib/hypervisor/hv_xen.py +++ b/lib/hypervisor/hv_xen.py @@ -163,7 +163,7 @@ class XenHypervisor(hv_base.BaseHypervisor): @return: tuple (name, id, memory, vcpus, stat, times) """ - xm_list = self._GetXMList(instance_name=="Domain-0") + xm_list = self._GetXMList(instance_name == "Domain-0") result = None for data in xm_list: if data[0] == instance_name: diff --git a/lib/jqueue.py b/lib/jqueue.py index 743152e80f9492d6cdffc438a5e43ead79f7c2fc..bf65d5bc71fe6c4ce870ef8d8617ece5173bbaec 100644 --- a/lib/jqueue.py +++ b/lib/jqueue.py @@ -31,7 +31,6 @@ used by all other classes in this module. import logging import errno -import re import time import weakref import threading @@ -1491,11 +1490,7 @@ def _RequireOpenQueue(fn): class JobQueue(object): """Queue used to manage the jobs. - @cvar _RE_JOB_FILE: regex matching the valid job file names - """ - _RE_JOB_FILE = re.compile(r"^job-(%s)$" % constants.JOB_ID_TEMPLATE) - def __init__(self, context): """Constructor for JobQueue. @@ -1846,7 +1841,8 @@ class JobQueue(object): return utils.PathJoin(constants.JOB_QUEUE_ARCHIVE_DIR, cls._GetArchiveDirectory(job_id), "job-%s" % job_id) - def _GetJobIDsUnlocked(self, sort=True): + @staticmethod + def _GetJobIDsUnlocked(sort=True): """Return all known job IDs. The method only looks at disk because it's a requirement that all @@ -1861,7 +1857,7 @@ class JobQueue(object): """ jlist = [] for filename in utils.ListVisibleFiles(constants.QUEUE_DIR): - m = self._RE_JOB_FILE.match(filename) + m = constants.JOB_FILE_RE.match(filename) if m: jlist.append(m.group(1)) if sort: diff --git a/lib/netutils.py b/lib/netutils.py index 11d5150102ca3434894a93cc8c55c3a3cd0a9dce..7a1b6d032013409eb417592f56dfc5195c2ccff7 100644 --- a/lib/netutils.py +++ b/lib/netutils.py @@ -58,7 +58,7 @@ _IP_FAMILY_RE = re.compile(r"(?P<family>inet6?)\s+(?P<ip>%s)/" % _IP_RE_TEXT, # Dict used to convert from a string representing an IP family to an IP # version -_NAME_TO_IP_VER = { +_NAME_TO_IP_VER = { "inet": constants.IP4_VERSION, "inet6": constants.IP6_VERSION, } @@ -417,9 +417,9 @@ class IPAddress(object): assert 0 <= prefix <= cls.iplen target_int = cls._GetIPIntFromString(subnet[0]) # Convert prefix netmask to integer value of netmask - netmask_int = (2**cls.iplen)-1 ^ ((2**cls.iplen)-1 >> prefix) + netmask_int = (2 ** cls.iplen) - 1 ^ ((2 ** cls.iplen) - 1 >> prefix) # Calculate hostmask - hostmask_int = netmask_int ^ (2**cls.iplen)-1 + hostmask_int = netmask_int ^ (2 ** cls.iplen) - 1 # Calculate network address by and'ing netmask network_int = target_int & netmask_int # Calculate broadcast address by or'ing hostmask diff --git a/lib/objects.py b/lib/objects.py index 3ed05e9216739801b04880be410847de07b730ae..4e298c6a1761f97b5b1fcc04a293e7ec4da6e2f3 100644 --- a/lib/objects.py +++ b/lib/objects.py @@ -663,7 +663,7 @@ class Disk(ConfigObject): """ if self.dev_type == constants.LD_LV: - val = "<LogicalVolume(/dev/%s/%s" % self.logical_id + val = "<LogicalVolume(/dev/%s/%s" % self.logical_id elif self.dev_type in constants.LDS_DRBD: node_a, node_b, port, minor_a, minor_b = self.logical_id[:5] val = "<DRBD8(" @@ -790,7 +790,9 @@ class Instance(TaggableObject): node = self.primary_node if lvmap is None: - lvmap = { node : [] } + lvmap = { + node: [], + } ret = lvmap else: if not node in lvmap: @@ -802,7 +804,7 @@ class Instance(TaggableObject): for dev in devs: if dev.dev_type == constants.LD_LV: - lvmap[node].append(dev.logical_id[0]+"/"+dev.logical_id[1]) + lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1]) elif dev.dev_type in constants.LDS_DRBD: if dev.children: diff --git a/lib/opcodes.py b/lib/opcodes.py index 7fd66f6ac255c73eec0275919069ffeb6df9050b..14a132b1aefb2afb4506805c416c8099fb85ac62 100644 --- a/lib/opcodes.py +++ b/lib/opcodes.py @@ -1549,6 +1549,7 @@ class OpTagsDel(OpCode): ("name", ht.NoDefault, ht.TMaybeString, None), ] + # Test opcodes class OpTestDelay(OpCode): """Sleeps for a configured amount of time. diff --git a/lib/rapi/baserlib.py b/lib/rapi/baserlib.py index 534ebae19e18687d2a1c8550ce61cfeaea46c0bc..5f43e0af4d89ef9a2021bd8313cd8386dfa7a220 100644 --- a/lib/rapi/baserlib.py +++ b/lib/rapi/baserlib.py @@ -53,7 +53,10 @@ def BuildUriList(ids, uri_format, uri_fields=("name", "uri")): (field_id, field_uri) = uri_fields def _MapId(m_id): - return { field_id: m_id, field_uri: uri_format % m_id, } + return { + field_id: m_id, + field_uri: uri_format % m_id, + } # Make sure the result is sorted, makes it nicer to look at and simplifies # unittests. diff --git a/lib/rapi/rlib2.py b/lib/rapi/rlib2.py index fc5c6ec20979e6ed1eb89af0053b8f828d1139f1..fb32e8090644722dd2fc3aff2db20a8afbbe89ee 100644 --- a/lib/rapi/rlib2.py +++ b/lib/rapi/rlib2.py @@ -664,7 +664,6 @@ def _ParseModifyGroupRequest(name, data): }) - class R_2_groups_name_modify(baserlib.R_Generic): """/2/groups/[group_name]/modify resource. diff --git a/lib/runtime.py b/lib/runtime.py index 4d5e3efc624861e138c08e393c4c0febbd26f540..6108e2671b6ab574e3770bd127452ef239fae81a 100644 --- a/lib/runtime.py +++ b/lib/runtime.py @@ -1,4 +1,5 @@ # +# # Copyright (C) 2010 Google Inc. # @@ -186,4 +187,3 @@ def GetEnts(resolver=GetentResolver): _priv_lock.release() return _priv - diff --git a/lib/server/confd.py b/lib/server/confd.py index b24a495c12895baefa5ed1b164ff67f6ee472da1..fe5f7e11b62f4559cc509611d65cf837e7529f35 100644 --- a/lib/server/confd.py +++ b/lib/server/confd.py @@ -82,7 +82,7 @@ class ConfdAsyncUDPServer(daemon.AsyncUDPSocket): logging.debug(err) return - answer = self.processor.ExecQuery(query, ip, port) + answer = self.processor.ExecQuery(query, ip, port) if answer is not None: try: self.enqueue_send(ip, port, confd.PackMagic(answer)) diff --git a/lib/server/masterd.py b/lib/server/masterd.py index 99f199577d36eb3291b9e42280455a051e51b3e5..17eabf85fefde7bbe7a717eb2a1a10903f5f6268 100644 --- a/lib/server/masterd.py +++ b/lib/server/masterd.py @@ -113,6 +113,7 @@ class MasterClientHandler(daemon.AsyncTerminatedMessageStream): """ _MAX_UNHANDLED = 1 + def __init__(self, server, connected_socket, client_address, family): daemon.AsyncTerminatedMessageStream.__init__(self, connected_socket, client_address, diff --git a/lib/server/noded.py b/lib/server/noded.py index 527b261c4f47c7e29012f718316067aa32ec9600..580467ae975c3b009369fa02fff00c4d15f09bd4 100644 --- a/lib/server/noded.py +++ b/lib/server/noded.py @@ -711,7 +711,6 @@ class NodeHttpServer(http.server.HttpServer): """ return backend.DemoteFromMC() - @staticmethod def perspective_node_powercycle(params): """Tries to powercycle the nod. @@ -720,7 +719,6 @@ class NodeHttpServer(http.server.HttpServer): hypervisor_type = params[0] return backend.PowercycleNode(hypervisor_type) - # cluster -------------------------- @staticmethod diff --git a/lib/ssconf.py b/lib/ssconf.py index c1ef0966e36762e9df964ace06f9e4f6f93cba05..9cc2a345022ff3f33e5962e09d96bf1a6f8991dc 100644 --- a/lib/ssconf.py +++ b/lib/ssconf.py @@ -349,7 +349,8 @@ class SimpleStore(object): if len(value) > self._MAX_SIZE: raise errors.ConfigurationError("ssconf file %s above maximum size" % name) - utils.WriteFile(self.KeyToFilename(name), data=value, mode=0444) + utils.WriteFile(self.KeyToFilename(name), data=value, + mode=constants.SS_FILE_PERMS) finally: ssconf_lock.Unlock() diff --git a/lib/tools/__init__.py b/lib/tools/__init__.py index 99df0ed0d409f72fc80f5bffbd85cf5863f0cfbe..520fe8f9013d93fda86deffda867b2fcc55f07c7 100644 --- a/lib/tools/__init__.py +++ b/lib/tools/__init__.py @@ -1,3 +1,6 @@ +# +# + # Copyright (C) 2011 Google Inc. # # This program is free software; you can redistribute it and/or modify diff --git a/lib/tools/ensure_dirs.py b/lib/tools/ensure_dirs.py index 8de0cbf4dc827e0c1375d49792a0dc96677b7b21..7abcce2a2bcf556326fd9b8df9663bac52a015b1 100644 --- a/lib/tools/ensure_dirs.py +++ b/lib/tools/ensure_dirs.py @@ -1,3 +1,6 @@ +# +# + # Copyright (C) 2011 Google Inc. # # This program is free software; you can redistribute it and/or modify @@ -25,15 +28,25 @@ import os.path import optparse import sys import stat +import logging from ganeti import constants from ganeti import errors from ganeti import runtime from ganeti import ssconf +from ganeti import utils +from ganeti import cli + +(DIR, + FILE, + QUEUE_DIR) = range(1, 4) -(DIR, FILE) = range(2) -ALL_TYPES = frozenset([DIR, FILE]) +ALL_TYPES = frozenset([ + DIR, + FILE, + QUEUE_DIR, + ]) class EnsureError(errors.GenericError): @@ -43,7 +56,7 @@ class EnsureError(errors.GenericError): def EnsurePermission(path, mode, uid=-1, gid=-1, must_exist=True, - _chmod_fn=os.chmod, _chown_fn=os.chown): + _chmod_fn=os.chmod, _chown_fn=os.chown, _stat_fn=os.stat): """Ensures that given path has given mode. @param path: The path to the file @@ -55,21 +68,32 @@ def EnsurePermission(path, mode, uid=-1, gid=-1, must_exist=True, @param _chown_fn: chown function to use (unittest only) """ + logging.debug("Checking %s", path) try: - _chmod_fn(path, mode) + st = _stat_fn(path) + + fmode = stat.S_IMODE(st[stat.ST_MODE]) + if fmode != mode: + logging.debug("Changing mode of %s from %#o to %#o", path, fmode, mode) + _chmod_fn(path, mode) if max(uid, gid) > -1: - _chown_fn(path, uid, gid) + fuid = st[stat.ST_UID] + fgid = st[stat.ST_GID] + if fuid != uid or fgid != gid: + logging.debug("Changing owner of %s from UID %s/GID %s to" + " UID %s/GID %s", path, fuid, fgid, uid, gid) + _chown_fn(path, uid, gid) except EnvironmentError, err: if err.errno == errno.ENOENT: if must_exist: - raise EnsureError("Path %s does not exists, but should" % path) + raise EnsureError("Path %s should exist, but does not" % path) else: - raise EnsureError("Error while changing permission on %s: %s" % + raise EnsureError("Error while changing permissions on %s: %s" % (path, err)) -def EnsureDir(path, mode, uid, gid, _stat_fn=os.lstat, _mkdir_fn=os.mkdir, +def EnsureDir(path, mode, uid, gid, _lstat_fn=os.lstat, _mkdir_fn=os.mkdir, _ensure_fn=EnsurePermission): """Ensures that given path is a dir and has given mode, uid and gid set. @@ -77,23 +101,23 @@ def EnsureDir(path, mode, uid, gid, _stat_fn=os.lstat, _mkdir_fn=os.mkdir, @param mode: The mode of the file @param uid: The uid of the owner of this file @param gid: The gid of the owner of this file - @param _stat_fn: Stat function to use (unittest only) + @param _lstat_fn: Stat function to use (unittest only) @param _mkdir_fn: mkdir function to use (unittest only) @param _ensure_fn: ensure function to use (unittest only) """ + logging.debug("Checking directory %s", path) try: # We don't want to follow symlinks - st_mode = _stat_fn(path)[stat.ST_MODE] - - if not stat.S_ISDIR(st_mode): - raise EnsureError("Path %s is expected to be a directory, but it's not" % - path) + st = _lstat_fn(path) except EnvironmentError, err: - if err.errno == errno.ENOENT: - _mkdir_fn(path) - else: - raise EnsureError("Error while do a stat() on %s: %s" % (path, err)) + if err.errno != errno.ENOENT: + raise EnsureError("stat(2) on %s failed: %s" % (path, err)) + _mkdir_fn(path) + else: + if not stat.S_ISDIR(st[stat.ST_MODE]): + raise EnsureError("Path %s is expected to be a directory, but isn't" % + path) _ensure_fn(path, mode, uid=uid, gid=gid) @@ -113,6 +137,8 @@ def RecursiveEnsure(path, uid, gid, dir_perm, file_perm): assert os.path.isabs(path), "Path %s is not absolute" % path assert os.path.isdir(path), "Path %s is not a dir" % path + logging.debug("Recursively processing %s", path) + for root, dirs, files in os.walk(path): for subdir in dirs: EnsurePermission(os.path.join(root, subdir), dir_perm, uid=uid, gid=gid) @@ -122,6 +148,20 @@ def RecursiveEnsure(path, uid, gid, dir_perm, file_perm): gid=gid) +def EnsureQueueDir(path, mode, uid, gid): + """Sets the correct permissions on all job files in the queue. + + @param path: Directory path + @param mode: Wanted file mode + @param uid: Wanted user ID + @param gid: Wanted group ID + + """ + for filename in utils.ListVisibleFiles(path): + if constants.JOB_FILE_RE.match(filename): + EnsurePermission(utils.PathJoin(path, filename), mode, uid=uid, gid=gid) + + def ProcessPath(path): """Processes a path component. @@ -132,10 +172,13 @@ def ProcessPath(path): assert pathtype in ALL_TYPES - if pathtype == DIR: + if pathtype in (DIR, QUEUE_DIR): # No additional parameters assert len(path[5:]) == 0 - EnsureDir(pathname, mode, uid, gid) + if pathtype == DIR: + EnsureDir(pathname, mode, uid, gid) + elif pathtype == QUEUE_DIR: + EnsureQueueDir(pathname, mode, uid, gid) elif pathtype == FILE: (must_exist, ) = path[5:] EnsurePermission(pathname, mode, uid=uid, gid=gid, must_exist=must_exist) @@ -172,11 +215,16 @@ def GetPaths(): ss = ssconf.SimpleStore() for ss_path in ss.GetFileList(): - paths.append((ss_path, FILE, 0400, getent.noded_uid, 0, False)) + paths.append((ss_path, FILE, constants.SS_FILE_PERMS, + getent.noded_uid, 0, False)) paths.extend([ (constants.QUEUE_DIR, DIR, 0700, getent.masterd_uid, getent.masterd_gid), + (constants.QUEUE_DIR, QUEUE_DIR, 0600, getent.masterd_uid, + getent.masterd_gid), + (constants.JOB_QUEUE_LOCK_FILE, FILE, 0600, + getent.masterd_uid, getent.masterd_gid, False), (constants.JOB_QUEUE_SERIAL_FILE, FILE, 0600, getent.masterd_uid, getent.masterd_gid, False), (constants.JOB_QUEUE_ARCHIVE_DIR, DIR, 0700, @@ -214,6 +262,26 @@ def GetPaths(): return tuple(paths) +def SetupLogging(opts): + """Configures the logging module. + + """ + formatter = logging.Formatter("%(asctime)s: %(message)s") + + stderr_handler = logging.StreamHandler() + stderr_handler.setFormatter(formatter) + if opts.debug: + stderr_handler.setLevel(logging.NOTSET) + elif opts.verbose: + stderr_handler.setLevel(logging.INFO) + else: + stderr_handler.setLevel(logging.WARNING) + + root_logger = logging.getLogger("") + root_logger.setLevel(logging.NOTSET) + root_logger.addHandler(stderr_handler) + + def ParseOptions(): """Parses the options passed to the program. @@ -224,9 +292,11 @@ def ParseOptions(): parser = optparse.OptionParser(usage="%%prog [--full-run]", prog=program) + parser.add_option(cli.DEBUG_OPT) + parser.add_option(cli.VERBOSE_OPT) parser.add_option("--full-run", "-f", dest="full_run", action="store_true", - default=False, help=("Make a full run and collect" - " additional files (time consuming)")) + default=False, help=("Make a full run and set permissions" + " on archived jobs (time consuming)")) return parser.parse_args() @@ -235,9 +305,15 @@ def Main(): """Main routine. """ - getent = runtime.GetEnts() (opts, _) = ParseOptions() + SetupLogging(opts) + + if opts.full_run: + logging.info("Running in full mode") + + getent = runtime.GetEnts() + try: for path in GetPaths(): ProcessPath(path) @@ -246,7 +322,7 @@ def Main(): RecursiveEnsure(constants.JOB_QUEUE_ARCHIVE_DIR, getent.masterd_uid, getent.masterd_gid, 0700, 0600) except EnsureError, err: - print >> sys.stderr, "An error occurred while ensure permissions:", err + logging.error("An error occurred while setting permissions: %s", err) return constants.EXIT_FAILURE return constants.EXIT_SUCCESS diff --git a/lib/utils/io.py b/lib/utils/io.py index 413a119c7049c1b4d8016d3b26013079022c60a0..c64c125d1de97f65dbaf168b9116c7d8133bb69f 100644 --- a/lib/utils/io.py +++ b/lib/utils/io.py @@ -511,7 +511,7 @@ def TailFile(fname, lines=20): try: fd.seek(0, 2) pos = fd.tell() - pos = max(0, pos-4096) + pos = max(0, pos - 4096) fd.seek(pos, 0) raw_data = fd.read() finally: diff --git a/lib/utils/process.py b/lib/utils/process.py index 8ba95b41ac507311bf1223d8d8a758a580dc94d3..78937610127153058cbc773a202a63666401a417 100644 --- a/lib/utils/process.py +++ b/lib/utils/process.py @@ -84,7 +84,6 @@ class RunResult(object): __slots__ = ["exit_code", "signal", "stdout", "stderr", "failed", "fail_reason", "cmd"] - def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action, timeout): self.cmd = cmd diff --git a/lib/utils/retry.py b/lib/utils/retry.py index c8959bf51edda4347cd0ede7801920453223c929..4e36332e089c2e338be697eb3f37e6baa8afe943 100644 --- a/lib/utils/retry.py +++ b/lib/utils/retry.py @@ -206,6 +206,7 @@ def SimpleRetry(expected, fn, delay, timeout, args=None, wait_fn=time.sleep, """ rdict = {} + def helper(*innerargs): # pylint: disable-msg=W0142 result = rdict["result"] = fn(*innerargs) diff --git a/lib/utils/text.py b/lib/utils/text.py index bfce363d5a8704cef4632e3869190511d1c009e5..a4c2777ecda452f022cd63551be8a89b36f1846d 100644 --- a/lib/utils/text.py +++ b/lib/utils/text.py @@ -338,7 +338,7 @@ def SafeEncode(text): resu = "" for char in text: c = ord(char) - if char == "\t": + if char == "\t": resu += r"\t" elif char == "\n": resu += r"\n" diff --git a/lib/watcher/__init__.py b/lib/watcher/__init__.py index a305fe4792721d7dc58da564cbfc80ef6e889e4a..323283b5b23a648a9ce9db3694aafeed3f48237d 100644 --- a/lib/watcher/__init__.py +++ b/lib/watcher/__init__.py @@ -363,8 +363,12 @@ def ParseOptions(): " 6 hours)") parser.add_option("--ignore-pause", dest="ignore_pause", default=False, action="store_true", help="Ignore cluster pause setting") - parser.add_option("--wait-children", dest="wait_children", default=False, + parser.add_option("--wait-children", dest="wait_children", action="store_true", help="Wait for child processes") + parser.add_option("--no-wait-children", dest="wait_children", + action="store_false", help="Don't wait for child processes") + # See optparse documentation for why default values are not set by options + parser.set_defaults(wait_children=True) options, args = parser.parse_args() options.job_age = cli.ParseTimespec(options.job_age) diff --git a/lib/watcher/nodemaint.py b/lib/watcher/nodemaint.py index 2cf5a4b23cb90dc762bfe41370138d8316cf363d..a00fc9b19fd07d457c4572604569ce06e6e2b78b 100644 --- a/lib/watcher/nodemaint.py +++ b/lib/watcher/nodemaint.py @@ -131,9 +131,9 @@ class NodeMaintenance(object): """ my_name = netutils.Hostname.GetSysName() - req = confd.client.ConfdClientRequest(type= - constants.CONFD_REQ_NODE_ROLE_BYNAME, - query=my_name) + req = \ + confd.client.ConfdClientRequest(type=constants.CONFD_REQ_NODE_ROLE_BYNAME, + query=my_name) self.confd_client.SendRequest(req, async=False, coverage=-1) timed_out, _, _ = self.confd_client.WaitForReply(req.rsalt) if not timed_out: diff --git a/man/gnt-backup.rst b/man/gnt-backup.rst index 74bf9158079052662ac42de087c02f89758bed51..41889fd4245f1213078f3bd6ea26ec309115eb3d 100644 --- a/man/gnt-backup.rst +++ b/man/gnt-backup.rst @@ -179,6 +179,9 @@ instance allocator documentation. The optional second value of the ``--node`` is used for the drbd template and specifies the remote node. +The ``--src-dir`` option allows importing instances from a directory +below ``@CUSTOM_EXPORT_DIR@``. + Since many of the parameters are by default read from the exported instance information and used as such, the new instance will have all parameters explicitly specified, the opposite of a newly added diff --git a/man/gnt-instance.rst b/man/gnt-instance.rst index 1b461fe8b716d51f95fb6d532121cff179a7cb5e..5d906a40399a1bec58936ba33251b34faa492bd3 100644 --- a/man/gnt-instance.rst +++ b/man/gnt-instance.rst @@ -653,7 +653,7 @@ REMOVE ^^^^^^ **remove** [--ignore-failures] [--shutdown-timeout=*N*] [--submit] -{*instance*} +[--force] {*instance*} Remove an instance. This will remove all data from the instance and there is *no way back*. If you are not sure if you use an instance @@ -674,6 +674,8 @@ The ``--submit`` option is used to send the job to the master daemon but not wait for its completion. The job ID will be shown so that it can be examined via **gnt-job info**. +The ``--force`` option is used to skip the interactive confirmation. + Example:: # gnt-instance remove instance1.example.com diff --git a/test/ganeti.tools.ensure_dirs_unittest.py b/test/ganeti.tools.ensure_dirs_unittest.py index 6e0dff3c7321c01b4ff459928c9ad28c4adae348..f619b817d8bbeae96ff9c8eee81063ecd347986d 100755 --- a/test/ganeti.tools.ensure_dirs_unittest.py +++ b/test/ganeti.tools.ensure_dirs_unittest.py @@ -31,91 +31,126 @@ from ganeti.tools import ensure_dirs import testutils +def _MockStatResult(cb, mode, uid, gid): + def _fn(path): + if cb: + cb() + return { + stat.ST_MODE: mode, + stat.ST_UID: uid, + stat.ST_GID: gid, + } + return _fn + + +def _RaiseNoEntError(): + raise EnvironmentError(errno.ENOENT, "not found") + + +def _OtherStatRaise(): + raise EnvironmentError() + + class TestEnsureDirsFunctions(unittest.TestCase): - def _NoopMkdir(self, _): - self.mkdir_called = True - - @staticmethod - def _MockStatResult(mode, pre_fn=lambda: 0): - def _fn(path): - pre_fn() - return {stat.ST_MODE: mode} - return _fn + UID_A = 16024 + UID_B = 25850 + GID_A = 14028 + GID_B = 29801 - def _VerifyEnsure(self, path, mode, uid=-1, gid=-1): - self.assertEqual(path, "/ganeti-qa-non-test") - self.assertEqual(mode, 0700) - self.assertEqual(uid, 0) - self.assertEqual(gid, 0) + def setUp(self): + self._chown_calls = [] + self._chmod_calls = [] + self._mkdir_calls = [] + + def tearDown(self): + self.assertRaises(IndexError, self._mkdir_calls.pop) + self.assertRaises(IndexError, self._chmod_calls.pop) + self.assertRaises(IndexError, self._chown_calls.pop) - @staticmethod - def _RaiseNoEntError(): - noent_error = EnvironmentError() - noent_error.errno = errno.ENOENT - raise noent_error + def _FakeMkdir(self, path): + self._mkdir_calls.append(path) - @staticmethod - def _OtherStatRaise(): - raise EnvironmentError() + def _FakeChown(self, path, uid, gid): + self._chown_calls.append((path, uid, gid)) - def _ChmodWrapper(self, pre_fn=lambda: 0): + def _ChmodWrapper(self, cb): def _fn(path, mode): - self.chmod_called = True - pre_fn() + self._chmod_calls.append((path, mode)) + if cb: + cb() return _fn - def _NoopChown(self, path, uid, gid): - self.chown_called = True + def _VerifyEnsure(self, path, mode, uid=-1, gid=-1): + self.assertEqual(path, "/ganeti-qa-non-test") + self.assertEqual(mode, 0700) + self.assertEqual(uid, self.UID_A) + self.assertEqual(gid, self.GID_A) def testEnsureDir(self): - is_dir_stat = self._MockStatResult(stat.S_IFDIR) - not_dir_stat = self._MockStatResult(0) - non_exist_stat = self._MockStatResult(stat.S_IFDIR, - pre_fn=self._RaiseNoEntError) - other_stat_raise = self._MockStatResult(stat.S_IFDIR, - pre_fn=self._OtherStatRaise) + is_dir_stat = _MockStatResult(None, stat.S_IFDIR, 0, 0) + ensure_dirs.EnsureDir("/ganeti-qa-non-test", 0700, self.UID_A, self.GID_A, + _lstat_fn=is_dir_stat, _ensure_fn=self._VerifyEnsure) + def testEnsureDirErrors(self): self.assertRaises(ensure_dirs.EnsureError, ensure_dirs.EnsureDir, "/ganeti-qa-non-test", 0700, 0, 0, - _stat_fn=not_dir_stat) + _lstat_fn=_MockStatResult(None, 0, 0, 0)) + self.assertRaises(IndexError, self._mkdir_calls.pop) + + other_stat_raise = _MockStatResult(_OtherStatRaise, stat.S_IFDIR, 0, 0) self.assertRaises(ensure_dirs.EnsureError, ensure_dirs.EnsureDir, "/ganeti-qa-non-test", 0700, 0, 0, - _stat_fn=other_stat_raise) - self.mkdir_called = False - ensure_dirs.EnsureDir("/ganeti-qa-non-test", 0700, 0, 0, - _stat_fn=non_exist_stat, _mkdir_fn=self._NoopMkdir, + _lstat_fn=other_stat_raise) + self.assertRaises(IndexError, self._mkdir_calls.pop) + + non_exist_stat = _MockStatResult(_RaiseNoEntError, stat.S_IFDIR, 0, 0) + ensure_dirs.EnsureDir("/ganeti-qa-non-test", 0700, self.UID_A, self.GID_A, + _lstat_fn=non_exist_stat, _mkdir_fn=self._FakeMkdir, _ensure_fn=self._VerifyEnsure) - self.assertTrue(self.mkdir_called) - self.mkdir_called = False - ensure_dirs.EnsureDir("/ganeti-qa-non-test", 0700, 0, 0, - _stat_fn=is_dir_stat, _ensure_fn=self._VerifyEnsure) - self.assertFalse(self.mkdir_called) - - def testEnsurePermission(self): - noent_chmod_fn = self._ChmodWrapper(pre_fn=self._RaiseNoEntError) + self.assertEqual(self._mkdir_calls.pop(0), "/ganeti-qa-non-test") + + def testEnsurePermissionNoEnt(self): self.assertRaises(ensure_dirs.EnsureError, ensure_dirs.EnsurePermission, "/ganeti-qa-non-test", 0600, - _chmod_fn=noent_chmod_fn) - self.chmod_called = False + _chmod_fn=NotImplemented, _chown_fn=NotImplemented, + _stat_fn=_MockStatResult(_RaiseNoEntError, 0, 0, 0)) + + def testEnsurePermissionNoEntMustNotExist(self): ensure_dirs.EnsurePermission("/ganeti-qa-non-test", 0600, must_exist=False, - _chmod_fn=noent_chmod_fn) - self.assertTrue(self.chmod_called) + _chmod_fn=NotImplemented, + _chown_fn=NotImplemented, + _stat_fn=_MockStatResult(_RaiseNoEntError, + 0, 0, 0)) + + def testEnsurePermissionOtherErrorMustNotExist(self): self.assertRaises(ensure_dirs.EnsureError, ensure_dirs.EnsurePermission, "/ganeti-qa-non-test", 0600, must_exist=False, - _chmod_fn=self._ChmodWrapper(pre_fn=self._OtherStatRaise)) - self.chmod_called = False - self.chown_called = False + _chmod_fn=NotImplemented, _chown_fn=NotImplemented, + _stat_fn=_MockStatResult(_OtherStatRaise, 0, 0, 0)) + + def testEnsurePermissionNoChanges(self): + ensure_dirs.EnsurePermission("/ganeti-qa-non-test", 0600, + _stat_fn=_MockStatResult(None, 0600, 0, 0), + _chmod_fn=self._ChmodWrapper(None), + _chown_fn=self._FakeChown) + + def testEnsurePermissionChangeMode(self): + ensure_dirs.EnsurePermission("/ganeti-qa-non-test", 0444, + _stat_fn=_MockStatResult(None, 0600, 0, 0), + _chmod_fn=self._ChmodWrapper(None), + _chown_fn=self._FakeChown) + self.assertEqual(self._chmod_calls.pop(0), ("/ganeti-qa-non-test", 0444)) + + def testEnsurePermissionSetUidGid(self): ensure_dirs.EnsurePermission("/ganeti-qa-non-test", 0600, - _chmod_fn=self._ChmodWrapper(), - _chown_fn=self._NoopChown) - self.assertTrue(self.chmod_called) - self.assertFalse(self.chown_called) - self.chmod_called = False - ensure_dirs.EnsurePermission("/ganeti-qa-non-test", 0600, uid=1, gid=1, - _chmod_fn=self._ChmodWrapper(), - _chown_fn=self._NoopChown) - self.assertTrue(self.chmod_called) - self.assertTrue(self.chown_called) + uid=self.UID_B, gid=self.GID_B, + _stat_fn=_MockStatResult(None, 0600, + self.UID_A, + self.GID_A), + _chmod_fn=self._ChmodWrapper(None), + _chown_fn=self._FakeChown) + self.assertEqual(self._chown_calls.pop(0), + ("/ganeti-qa-non-test", self.UID_B, self.GID_B)) def testPaths(self): paths = [(path[0], path[1]) for path in ensure_dirs.GetPaths()] diff --git a/tools/burnin b/tools/burnin index 7d28c36ad21eee49b42d293e65edd9eb0b18ff09..8ab7a15f4a0a33f27c5b6e0e138c91ce0a7e4cc4 100755 --- a/tools/burnin +++ b/tools/burnin @@ -51,6 +51,7 @@ LOG_HEADERS = { 2: "" } + class InstanceDown(Exception): """The checked instance was not up""" @@ -73,8 +74,8 @@ def Log(msg, *args, **kwargs): """ if args: msg = msg % args - indent = kwargs.get('indent', 0) - sys.stdout.write("%*s%s%s\n" % (2*indent, "", + indent = kwargs.get("indent", 0) + sys.stdout.write("%*s%s%s\n" % (2 * indent, "", LOG_HEADERS.get(indent, " "), msg)) sys.stdout.flush() @@ -564,8 +565,8 @@ class Burner(object): Log(msg, indent=2) op = opcodes.OpInstanceCreate(instance_name=instance, - disks = [ {"size": size} - for size in self.disk_size], + disks=[{"size": size} + for size in self.disk_size], disk_template=self.opts.disk_template, nics=self.opts.nics, mode=constants.INSTANCE_CREATE, @@ -719,8 +720,8 @@ class Burner(object): ignore_failures=True) imp_dir = utils.PathJoin(constants.EXPORT_DIR, full_name) imp_op = opcodes.OpInstanceCreate(instance_name=instance, - disks = [ {"size": size} - for size in self.disk_size], + disks=[{"size": size} + for size in self.disk_size], disk_template=self.opts.disk_template, nics=self.opts.nics, mode=constants.INSTANCE_IMPORT, @@ -996,7 +997,7 @@ class Burner(object): if opts.do_replace1 and opts.disk_template in constants.DTS_INT_MIRROR: self.BurnReplaceDisks1D8() if (opts.do_replace2 and len(self.nodes) > 2 and - opts.disk_template in constants.DTS_INT_MIRROR) : + opts.disk_template in constants.DTS_INT_MIRROR): self.BurnReplaceDisks2() if (opts.disk_template in constants.DTS_GROWABLE and diff --git a/tools/cfgupgrade b/tools/cfgupgrade index 20486cb8fe24eb59bf51c81b839f44ff750f548b..7197d24840b6ed1be71718e60d4119c2de648e93 100755 --- a/tools/cfgupgrade +++ b/tools/cfgupgrade @@ -248,6 +248,9 @@ def main(): del cfg logging.info("File loaded successfully") + cli.ToStderr("Configuration successfully upgraded for version %s.", + constants.RELEASE_VERSION) + if __name__ == "__main__": main() diff --git a/tools/cluster-merge b/tools/cluster-merge index 9b62f6f87d36155fdddcef6ecf9d7d06cdab7cfd..7d7091f7749c311f9e658f60b8938583ad8e6b84 100755 --- a/tools/cluster-merge +++ b/tools/cluster-merge @@ -81,6 +81,12 @@ RESTART_OPT = cli.cli_option("--restart", default=_RESTART_ALL, " same name (One of: %s/%s/%s)" % _RESTART_CHOICES)) +SKIP_STOP_INSTANCES_OPT = \ + cli.cli_option("--skip-stop-instances", default=True, action="store_false", + dest="stop_instances", + help=("Don't stop the instances on the clusters, just check " + "that none is running")) + def Flatten(unflattened_list): """Flattens a list. @@ -124,13 +130,22 @@ class Merger(object): """Handling the merge. """ - def __init__(self, clusters, pause_period, groups, restart, params): + RUNNING_STATUSES = frozenset([ + constants.INSTST_RUNNING, + constants.INSTST_ERRORUP, + ]) + + def __init__(self, clusters, pause_period, groups, restart, params, + stop_instances): """Initialize object with sane defaults and infos required. @param clusters: The list of clusters to merge in @param pause_period: The time watcher shall be disabled for @param groups: How to handle group conflicts @param restart: How to handle instance restart + @param stop_instances: Indicates whether the instances must be stopped + (True) or if the Merger must only check if no + instances are running on the mergee clusters (False) """ self.merger_data = [] @@ -142,10 +157,10 @@ class Merger(object): self.groups = groups self.restart = restart self.params = params + self.stop_instances = stop_instances if self.restart == _RESTART_UP: raise NotImplementedError - def Setup(self): """Sets up our end so we can do the merger. @@ -207,7 +222,7 @@ class Merger(object): for node in data.nodes: result = self._RunCmd(node, ("cat >> %s << '!EOF.'\n%s!EOF.\n" % (auth_keys, pub_key)), - private_key=data.key_path) + private_key=data.key_path, max_attempts=3) if result.failed: raise errors.RemoteError("Unable to add our public key to %s in %s." @@ -217,17 +232,36 @@ class Merger(object): def _RunCmd(self, hostname, command, user="root", use_cluster_key=False, strict_host_check=False, private_key=None, batch=True, - ask_key=False): + ask_key=False, max_attempts=1): """Wrapping SshRunner.Run with default parameters. For explanation of parameters see L{ganeti.ssh.SshRunner.Run}. """ - return self.ssh_runner.Run(hostname=hostname, command=command, user=user, - use_cluster_key=use_cluster_key, - strict_host_check=strict_host_check, - private_key=private_key, batch=batch, - ask_key=ask_key) + for _ in range(max_attempts): + result = self.ssh_runner.Run(hostname=hostname, command=command, + user=user, use_cluster_key=use_cluster_key, + strict_host_check=strict_host_check, + private_key=private_key, batch=batch, + ask_key=ask_key) + if not result.failed: + break + + return result + + def _CheckRunningInstances(self): + """Checks if on the clusters to be merged there are running instances + + @rtype: boolean + @return: True if there are running instances, False otherwise + + """ + for cluster in self.clusters: + result = self._RunCmd(cluster, "gnt-instance list -o status") + if self.RUNNING_STATUSES.intersection(result.output.splitlines()): + return True + + return False def _StopMergingInstances(self): """Stop instances on merging clusters. @@ -262,7 +296,7 @@ class Merger(object): cmd = "%s stop-all" % constants.DAEMON_UTIL for data in self.merger_data: for node in data.nodes: - result = self._RunCmd(node, cmd) + result = self._RunCmd(node, cmd, max_attempts=3) if result.failed: raise errors.RemoteError("Unable to stop daemons on %s." @@ -316,6 +350,10 @@ class Merger(object): for node in other_config.GetNodeList(): node_info = other_config.GetNodeInfo(node) + # Offline the node, it will be reonlined later at node readd + node_info.master_candidate = False + node_info.drained = False + node_info.offline = True my_config.AddNode(node_info, _CLUSTERMERGE_ECID + str(fake_ec_id)) fake_ec_id += 1 @@ -585,9 +623,8 @@ class Merger(object): result = utils.RunCmd(["gnt-node", "add", "--readd", "--no-ssh-key-check", "--force-join", node]) if result.failed: - raise errors.CommandError("Couldn't readd node %s. Fail reason: %s;" - " output: %s" % (node, result.fail_reason, - result.output)) + logging.error("%s failed to be readded. Reason: %s, output: %s", + node, result.fail_reason, result.output) result = utils.RunCmd(["gnt-cluster", "redist-conf"]) if result.failed: @@ -610,6 +647,7 @@ class Merger(object): (result.fail_reason, result.output)) # R0201: Method could be a function + # TODO: make this overridable, for some verify errors def _VerifyCluster(self): # pylint: disable-msg=R0201 """Runs gnt-cluster verify to verify the health. @@ -641,9 +679,14 @@ class Merger(object): rbsteps.append("Start all instances again on the merging" " clusters: %(clusters)s") - logging.info("Stopping merging instances (takes a while)") - self._StopMergingInstances() - + if self.stop_instances: + logging.info("Stopping merging instances (takes a while)") + self._StopMergingInstances() + logging.info("Checking that no instances are running on the mergees") + instances_running = self._CheckRunningInstances() + if instances_running: + raise errors.CommandError("Some instances are still running on the" + " mergees") logging.info("Disable watcher") self._DisableWatcher() logging.info("Stop daemons on merging nodes") @@ -742,6 +785,7 @@ def main(): parser.add_option(GROUPS_OPT) parser.add_option(RESTART_OPT) parser.add_option(PARAMS_OPT) + parser.add_option(SKIP_STOP_INSTANCES_OPT) (options, args) = parser.parse_args() @@ -751,7 +795,8 @@ def main(): parser.error("No clusters specified") cluster_merger = Merger(utils.UniqueSequence(args), options.pause_period, - options.groups, options.restart, options.params) + options.groups, options.restart, options.params, + options.stop_instances) try: try: cluster_merger.Setup() diff --git a/tools/ganeti-listrunner b/tools/ganeti-listrunner index b9371a939ae0665a74ab538a3941fec2889f0624..2ab63cef23637fcf4addce17f18fa19fec143780 100755 --- a/tools/ganeti-listrunner +++ b/tools/ganeti-listrunner @@ -323,7 +323,14 @@ def RunRemoteCommand(connection, command, logfile): ### Read when data is available output = "" while select.select([session], [], []): - data = session.recv(1024) + try: + data = session.recv(1024) + except socket.timeout, err: + data = None + WriteLog("FAILED: socket.timeout %s" % err, logfile) + except socket.error, err: + data = None + WriteLog("FAILED: socket.error %s" % err, logfile) if not data: break output += data @@ -331,7 +338,7 @@ def RunRemoteCommand(connection, command, logfile): WriteLog("SUCCESS: command output follows", logfile) for line in output.split("\n"): - WriteLog("output = %s" %line, logfile) + WriteLog("output = %s" % line, logfile) WriteLog("command execution completed", logfile) session.close() diff --git a/tools/kvm-console-wrapper b/tools/kvm-console-wrapper index b0acd084e7b645ddf7f4f5bef1324ec64d28a8fb..7e65566ba98419a258387f05385e9a6bb68c497c 100755 --- a/tools/kvm-console-wrapper +++ b/tools/kvm-console-wrapper @@ -1,4 +1,5 @@ #!/bin/bash +# # Copyright (C) 2011 Google Inc. # diff --git a/tools/kvm-ifup.in b/tools/kvm-ifup.in index 370040c83324949d9c964479b2f913d02a37a96b..4ba17e7f5bd20af1bebf3dff4cac4aa5175af9c1 100644 --- a/tools/kvm-ifup.in +++ b/tools/kvm-ifup.in @@ -1,4 +1,5 @@ #!/bin/sh +# # Copyright (C) 2011 Google Inc. # diff --git a/tools/lvmstrap b/tools/lvmstrap index 67c9cc32ac14175ed39772c3e2927780c4f3958b..e24b2a6daeacc5a268ec002aa0f278115e2806a4 100755 --- a/tools/lvmstrap +++ b/tools/lvmstrap @@ -85,7 +85,8 @@ PART_RE = re.compile("^((?:h|s|m|ub)d[a-z]{1,2})[0-9]+$") #: Minimum partition size to be considered (1 GB) PART_MINSIZE = 1024 * 1024 * 1024 -MBR_MAX_SIZE = 2 * 10**12 +MBR_MAX_SIZE = 2 * (10 ** 12) + class Error(Exception): """Generic exception""" @@ -192,7 +193,6 @@ def ParseOptions(): action="store_true", default=False, help="use sfdisk instead of parted") - options, args = parser.parse_args() if len(args) != 1: Usage() @@ -232,7 +232,7 @@ def SysfsName(disk): match = PART_RE.match(disk) if match: # this is a partition, which resides in /sys/block under a different name - disk = "%s/%s" % (match.group(1), disk) + disk = "%s/%s" % (match.group(1), disk) return "/sys/block/%s" % disk @@ -776,7 +776,7 @@ def PartitionDisk(name, use_sfdisk): result = ExecCommand("parted -s /dev/%s mklabel %s" % (name, label_type)) if result.failed: raise OperationalError("Critical: failed to create %s label on %s" % - (label_type,name)) + (label_type, name)) result = ExecCommand("parted -s /dev/%s mkpart pri ext2 1 100%%" % name) if result.failed: raise OperationalError("Critical: failed to create partition on %s" % @@ -892,7 +892,6 @@ def BootStrap(): " LV count: %s, size: %s, free: %s." % (vgname, lv_count, vg_size, vg_free)) - disklist = ValidateDiskList(options) for disk in disklist: diff --git a/tools/xm-console-wrapper b/tools/xm-console-wrapper index 540a62d42cbc3905aee3c56f59e069f033b80556..d6fd556637f4fa324009fd5349bc7885c411b146 100755 --- a/tools/xm-console-wrapper +++ b/tools/xm-console-wrapper @@ -1,4 +1,5 @@ #!/bin/bash +# # Copyright (C) 2011 Google Inc. #