diff --git a/lib/backend.py b/lib/backend.py index 82408e2fbe6d1a411d14414c2f4f782effbb38f7..d8b44e2fd94191eb35f5ba70b9ae808af5f41631 100644 --- a/lib/backend.py +++ b/lib/backend.py @@ -421,7 +421,7 @@ def LeaveCluster(modify_ssh_setup): result.cmd, result.exit_code, result.output) # Raise a custom exception (handled in ganeti-noded) - raise errors.QuitGanetiException(True, 'Shutdown scheduled') + raise errors.QuitGanetiException(True, "Shutdown scheduled") def GetNodeInfo(vgname, hypervisor_type): @@ -449,8 +449,8 @@ def GetNodeInfo(vgname, hypervisor_type): if vginfo: vg_free = int(round(vginfo[0][0], 0)) vg_size = int(round(vginfo[0][1], 0)) - outputarray['vg_size'] = vg_size - outputarray['vg_free'] = vg_free + outputarray["vg_size"] = vg_size + outputarray["vg_free"] = vg_free if hypervisor_type is not None: hyper = hypervisor.GetHypervisor(hypervisor_type) @@ -707,7 +707,7 @@ def GetVolumeList(vg_names): """ lvs = {} - sep = '|' + sep = "|" if not vg_names: vg_names = [] result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix", @@ -723,9 +723,9 @@ def GetVolumeList(vg_names): logging.error("Invalid line returned from lvs output: '%s'", line) continue vg_name, name, size, attr = match.groups() - inactive = attr[4] == '-' - online = attr[5] == 'o' - virtual = attr[0] == 'v' + inactive = attr[4] == "-" + online = attr[5] == "o" + virtual = attr[0] == "v" if virtual: # we don't want to report such volumes as existing, since they # don't really hold data @@ -773,20 +773,20 @@ def NodeVolumes(): result.output) def parse_dev(dev): - return dev.split('(')[0] + return dev.split("(")[0] def handle_dev(dev): return [parse_dev(x) for x in dev.split(",")] def map_line(line): line = [v.strip() for v in line] - return [{'name': line[0], 'size': line[1], - 'dev': dev, 'vg': line[3]} for dev in handle_dev(line[2])] + return [{"name": line[0], "size": line[1], + "dev": dev, "vg": line[3]} for dev in handle_dev(line[2])] all_devs = [] for line in result.stdout.splitlines(): - if line.count('|') >= 3: - all_devs.extend(map_line(line.split('|'))) + if line.count("|") >= 3: + all_devs.extend(map_line(line.split("|"))) else: logging.warning("Strange line in the output from lvs: '%s'", line) return all_devs @@ -851,9 +851,9 @@ def GetInstanceInfo(instance, hname): iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance) if iinfo is not None: - output['memory'] = iinfo[2] - output['state'] = iinfo[4] - output['time'] = iinfo[5] + output["memory"] = iinfo[2] + output["state"] = iinfo[4] + output["time"] = iinfo[5] return output @@ -907,16 +907,16 @@ def GetAllInstancesInfo(hypervisor_list): if iinfo: for name, _, memory, vcpus, state, times in iinfo: value = { - 'memory': memory, - 'vcpus': vcpus, - 'state': state, - 'time': times, + "memory": memory, + "vcpus": vcpus, + "state": state, + "time": times, } if name in output: # we only check static parameters, like memory and vcpus, # and not state and time which can change between the # invocations of the different hypervisors - for key in 'memory', 'vcpus': + for key in "memory", "vcpus": if value[key] != output[name][key]: _Fail("Instance %s is running twice" " with different parameters", name) @@ -961,7 +961,7 @@ def InstanceOsAdd(instance, reinstall, debug): create_env = OSEnvironment(instance, inst_os, debug) if reinstall: - create_env['INSTANCE_REINSTALL'] = "1" + create_env["INSTANCE_REINSTALL"] = "1" logfile = _InstanceLogName("add", instance.os, instance.name) @@ -993,7 +993,7 @@ def RunRenameInstance(instance, old_name, debug): inst_os = OSFromDisk(instance.os) rename_env = OSEnvironment(instance, inst_os, debug) - rename_env['OLD_INSTANCE_NAME'] = old_name + rename_env["OLD_INSTANCE_NAME"] = old_name logfile = _InstanceLogName("rename", instance.os, "%s-%s" % (old_name, instance.name)) @@ -1331,7 +1331,7 @@ def BlockdevCreate(disk, size, owner, on_primary, info): it's not required to return anything. """ - # TODO: remove the obsolete 'size' argument + # TODO: remove the obsolete "size" argument # pylint: disable-msg=W0613 clist = [] if disk.children: @@ -1831,7 +1831,7 @@ def BlockdevExport(disk, dest_node, dest_path, cluster_name): destcmd) # all commands have been checked, so we're safe to combine them - command = '|'.join([expcmd, utils.ShellQuoteArgs(remotecmd)]) + command = "|".join([expcmd, utils.ShellQuoteArgs(remotecmd)]) result = utils.RunCmd(["bash", "-c", command]) @@ -1925,7 +1925,7 @@ def _ErrnoOrStr(err): @param err: the exception to format """ - if hasattr(err, 'errno'): + if hasattr(err, "errno"): detail = errno.errorcode[err.errno] else: detail = str(err) @@ -2055,10 +2055,10 @@ def _TryOSFromDisk(name, base_dir=None): os_files = dict.fromkeys(constants.OS_SCRIPTS) if max(api_versions) >= constants.OS_API_V15: - os_files[constants.OS_VARIANTS_FILE] = '' + os_files[constants.OS_VARIANTS_FILE] = "" if max(api_versions) >= constants.OS_API_V20: - os_files[constants.OS_PARAMETERS_FILE] = '' + os_files[constants.OS_PARAMETERS_FILE] = "" else: del os_files[constants.OS_SCRIPT_VERIFY] @@ -2161,20 +2161,20 @@ def OSCoreEnv(os_name, inst_os, os_params, debug=0): result = {} api_version = \ max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions)) - result['OS_API_VERSION'] = '%d' % api_version - result['OS_NAME'] = inst_os.name - result['DEBUG_LEVEL'] = '%d' % debug + result["OS_API_VERSION"] = "%d" % api_version + result["OS_NAME"] = inst_os.name + result["DEBUG_LEVEL"] = "%d" % debug # OS variants if api_version >= constants.OS_API_V15: variant = objects.OS.GetVariant(os_name) if not variant: variant = inst_os.supported_variants[0] - result['OS_VARIANT'] = variant + result["OS_VARIANT"] = variant # OS params for pname, pvalue in os_params.items(): - result['OSP_%s' % pname.upper()] = pvalue + result["OSP_%s" % pname.upper()] = pvalue return result @@ -2199,38 +2199,38 @@ def OSEnvironment(instance, inst_os, debug=0): for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]: result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr)) - result['HYPERVISOR'] = instance.hypervisor - result['DISK_COUNT'] = '%d' % len(instance.disks) - result['NIC_COUNT'] = '%d' % len(instance.nics) - result['INSTANCE_SECONDARY_NODES'] = \ - ('%s' % " ".join(instance.secondary_nodes)) + result["HYPERVISOR"] = instance.hypervisor + result["DISK_COUNT"] = "%d" % len(instance.disks) + result["NIC_COUNT"] = "%d" % len(instance.nics) + result["INSTANCE_SECONDARY_NODES"] = \ + ("%s" % " ".join(instance.secondary_nodes)) # Disks for idx, disk in enumerate(instance.disks): real_disk = _OpenRealBD(disk) - result['DISK_%d_PATH' % idx] = real_disk.dev_path - result['DISK_%d_ACCESS' % idx] = disk.mode + result["DISK_%d_PATH" % idx] = real_disk.dev_path + result["DISK_%d_ACCESS" % idx] = disk.mode if constants.HV_DISK_TYPE in instance.hvparams: - result['DISK_%d_FRONTEND_TYPE' % idx] = \ + result["DISK_%d_FRONTEND_TYPE" % idx] = \ instance.hvparams[constants.HV_DISK_TYPE] if disk.dev_type in constants.LDS_BLOCK: - result['DISK_%d_BACKEND_TYPE' % idx] = 'block' + result["DISK_%d_BACKEND_TYPE" % idx] = "block" elif disk.dev_type == constants.LD_FILE: - result['DISK_%d_BACKEND_TYPE' % idx] = \ - 'file:%s' % disk.physical_id[0] + result["DISK_%d_BACKEND_TYPE" % idx] = \ + "file:%s" % disk.physical_id[0] # NICs for idx, nic in enumerate(instance.nics): - result['NIC_%d_MAC' % idx] = nic.mac + result["NIC_%d_MAC" % idx] = nic.mac if nic.ip: - result['NIC_%d_IP' % idx] = nic.ip - result['NIC_%d_MODE' % idx] = nic.nicparams[constants.NIC_MODE] + result["NIC_%d_IP" % idx] = nic.ip + result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE] if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED: - result['NIC_%d_BRIDGE' % idx] = nic.nicparams[constants.NIC_LINK] + result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK] if nic.nicparams[constants.NIC_LINK]: - result['NIC_%d_LINK' % idx] = nic.nicparams[constants.NIC_LINK] + result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK] if constants.HV_NIC_TYPE in instance.hvparams: - result['NIC_%d_FRONTEND_TYPE' % idx] = \ + result["NIC_%d_FRONTEND_TYPE" % idx] = \ instance.hvparams[constants.HV_NIC_TYPE] # HV/BE params @@ -2318,46 +2318,46 @@ def FinalizeExport(instance, snap_disks): config = objects.SerializableConfigParser() config.add_section(constants.INISECT_EXP) - config.set(constants.INISECT_EXP, 'version', '0') - config.set(constants.INISECT_EXP, 'timestamp', '%d' % int(time.time())) - config.set(constants.INISECT_EXP, 'source', instance.primary_node) - config.set(constants.INISECT_EXP, 'os', instance.os) + config.set(constants.INISECT_EXP, "version", "0") + config.set(constants.INISECT_EXP, "timestamp", "%d" % int(time.time())) + config.set(constants.INISECT_EXP, "source", instance.primary_node) + config.set(constants.INISECT_EXP, "os", instance.os) config.set(constants.INISECT_EXP, "compression", "none") config.add_section(constants.INISECT_INS) - config.set(constants.INISECT_INS, 'name', instance.name) - config.set(constants.INISECT_INS, 'memory', '%d' % + config.set(constants.INISECT_INS, "name", instance.name) + config.set(constants.INISECT_INS, "memory", "%d" % instance.beparams[constants.BE_MEMORY]) - config.set(constants.INISECT_INS, 'vcpus', '%d' % + config.set(constants.INISECT_INS, "vcpus", "%d" % instance.beparams[constants.BE_VCPUS]) - config.set(constants.INISECT_INS, 'disk_template', instance.disk_template) - config.set(constants.INISECT_INS, 'hypervisor', instance.hypervisor) + config.set(constants.INISECT_INS, "disk_template", instance.disk_template) + config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor) config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags())) nic_total = 0 for nic_count, nic in enumerate(instance.nics): nic_total += 1 - config.set(constants.INISECT_INS, 'nic%d_mac' % - nic_count, '%s' % nic.mac) - config.set(constants.INISECT_INS, 'nic%d_ip' % nic_count, '%s' % nic.ip) + config.set(constants.INISECT_INS, "nic%d_mac" % + nic_count, "%s" % nic.mac) + config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip) for param in constants.NICS_PARAMETER_TYPES: - config.set(constants.INISECT_INS, 'nic%d_%s' % (nic_count, param), - '%s' % nic.nicparams.get(param, None)) + config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param), + "%s" % nic.nicparams.get(param, None)) # TODO: redundant: on load can read nics until it doesn't exist - config.set(constants.INISECT_INS, 'nic_count' , '%d' % nic_total) + config.set(constants.INISECT_INS, "nic_count" , "%d" % nic_total) disk_total = 0 for disk_count, disk in enumerate(snap_disks): if disk: disk_total += 1 - config.set(constants.INISECT_INS, 'disk%d_ivname' % disk_count, - ('%s' % disk.iv_name)) - config.set(constants.INISECT_INS, 'disk%d_dump' % disk_count, - ('%s' % disk.physical_id[1])) - config.set(constants.INISECT_INS, 'disk%d_size' % disk_count, - ('%d' % disk.size)) - - config.set(constants.INISECT_INS, 'disk_count' , '%d' % disk_total) + config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count, + ("%s" % disk.iv_name)) + config.set(constants.INISECT_INS, "disk%d_dump" % disk_count, + ("%s" % disk.physical_id[1])) + config.set(constants.INISECT_INS, "disk%d_size" % disk_count, + ("%d" % disk.size)) + + config.set(constants.INISECT_INS, "disk_count" , "%d" % disk_total) # New-style hypervisor/backend parameters diff --git a/lib/bdev.py b/lib/bdev.py index 10dba3299893954a9880b08a5d247f27a8eb6f9a..70470f006bb102a68cffa09750dc652aa65674ee 100644 --- a/lib/bdev.py +++ b/lib/bdev.py @@ -600,7 +600,7 @@ class LogicalVolume(BlockDev): # one line for any non-empty string logging.error("Can't parse LVS output, no lines? Got '%s'", str(out)) return False - out = out[-1].strip().rstrip(',') + out = out[-1].strip().rstrip(",") out = out.split(",") if len(out) != 5: logging.error("Can't parse LVS output, len(%s) != 5", str(out)) @@ -633,7 +633,7 @@ class LogicalVolume(BlockDev): self.minor = minor self.pe_size = pe_size self.stripe_count = stripes - self._degraded = status[0] == 'v' # virtual volume, i.e. doesn't backing + self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing # storage self.attached = True return True @@ -745,8 +745,8 @@ class LogicalVolume(BlockDev): BlockDev.SetInfo(self, text) # Replace invalid characters - text = re.sub('^[^A-Za-z0-9_+.]', '_', text) - text = re.sub('[^-A-Za-z0-9_+.]', '_', text) + text = re.sub("^[^A-Za-z0-9_+.]", "_", text) + text = re.sub("[^-A-Za-z0-9_+.]", "_", text) # Only up to 128 characters are allowed text = text[:128] @@ -971,14 +971,14 @@ class BaseDRBD(BlockDev): # pylint: disable-msg=W0223 first_line) values = version.groups() - retval = {'k_major': int(values[0]), - 'k_minor': int(values[1]), - 'k_point': int(values[2]), - 'api': int(values[3]), - 'proto': int(values[4]), + retval = {"k_major": int(values[0]), + "k_minor": int(values[1]), + "k_point": int(values[2]), + "api": int(values[3]), + "proto": int(values[4]), } if values[5] is not None: - retval['proto2'] = values[5] + retval["proto2"] = values[5] return retval @@ -1113,10 +1113,10 @@ class DRBD8(BaseDRBD): super(DRBD8, self).__init__(unique_id, children, size) self.major = self._DRBD_MAJOR version = self._GetVersion(self._GetProcData()) - if version['k_major'] != 8 : + if version["k_major"] != 8 : _ThrowError("Mismatch in DRBD kernel version and requested ganeti" " usage: kernel is %s.%s, ganeti wants 8.x", - version['k_major'], version['k_minor']) + version["k_major"], version["k_minor"]) if (self._lhost is not None and self._lhost == self._rhost and self._lport == self._rport): @@ -1210,7 +1210,7 @@ class DRBD8(BaseDRBD): pyp.Optional(pyp.restOfLine).suppress()) # an entire section - section_name = pyp.Word(pyp.alphas + '_') + section_name = pyp.Word(pyp.alphas + "_") section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt)) @@ -1343,18 +1343,18 @@ class DRBD8(BaseDRBD): # what we aim here is to revert back to the 'drain' method of # disk flushes and to disable metadata barriers, in effect going # back to pre-8.0.7 behaviour - vmaj = version['k_major'] - vmin = version['k_minor'] - vrel = version['k_point'] + vmaj = version["k_major"] + vmin = version["k_minor"] + vrel = version["k_point"] assert vmaj == 8 if vmin == 0: # 8.0.x if vrel >= 12: - args.extend(['-i', '-m']) + args.extend(["-i", "-m"]) elif vmin == 2: # 8.2.x if vrel >= 7: - args.extend(['-i', '-m']) + args.extend(["-i", "-m"]) elif vmaj >= 3: # 8.3.x or newer - args.extend(['-i', '-a', 'm']) + args.extend(["-i", "-a", "m"]) result = utils.RunCmd(args) if result.failed: _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output) @@ -2102,7 +2102,7 @@ class PersistentBlockDevice(BlockDev): if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: raise ValueError("Invalid configuration data %s" % str(unique_id)) self.dev_path = unique_id[1] - if not os.path.realpath(self.dev_path).startswith('/dev/'): + if not os.path.realpath(self.dev_path).startswith("/dev/"): raise ValueError("Full path '%s' lies outside /dev" % os.path.realpath(self.dev_path)) # TODO: this is just a safety guard checking that we only deal with devices diff --git a/lib/cli.py b/lib/cli.py index a066cd63dcc3de5823efafa0a20b883f9d3eb7f5..0fa91606a520dbd81559b9f6b3fe9ae2137c6a2b 100644 --- a/lib/cli.py +++ b/lib/cli.py @@ -517,7 +517,7 @@ def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613 """ if ":" not in value: - ident, rest = value, '' + ident, rest = value, "" else: ident, rest = value.split(":", 1) @@ -621,7 +621,7 @@ SEP_OPT = cli_option("--separator", default=None, " (defaults to one space)")) USEUNITS_OPT = cli_option("--units", default=None, - dest="units", choices=('h', 'm', 'g', 't'), + dest="units", choices=("h", "m", "g", "t"), help="Specify units for output (one of h/m/g/t)") FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store", @@ -1417,8 +1417,8 @@ def SplitNodeOption(value): """Splits the value of a --node option. """ - if value and ':' in value: - return value.split(':', 1) + if value and ":" in value: + return value.split(":", 1) else: return (value, None) @@ -1435,7 +1435,7 @@ def CalculateOSNames(os_name, os_variants): """ if os_variants: - return ['%s+%s' % (os_name, v) for v in os_variants] + return ["%s+%s" % (os_name, v) for v in os_variants] else: return [os_name] @@ -1477,12 +1477,12 @@ def AskUser(text, choices=None): """ if choices is None: - choices = [('y', True, 'Perform the operation'), - ('n', False, 'Do not perform the operation')] + choices = [("y", True, "Perform the operation"), + ("n", False, "Do not perform the operation")] if not choices or not isinstance(choices, list): raise errors.ProgrammerError("Invalid choices argument to AskUser") for entry in choices: - if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?': + if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?": raise errors.ProgrammerError("Invalid choices element to AskUser") answer = choices[-1][1] @@ -1497,18 +1497,18 @@ def AskUser(text, choices=None): try: chars = [entry[0] for entry in choices] chars[-1] = "[%s]" % chars[-1] - chars.append('?') + chars.append("?") maps = dict([(entry[0], entry[1]) for entry in choices]) while True: f.write(text) - f.write('\n') + f.write("\n") f.write("/".join(chars)) f.write(": ") line = f.readline(2).strip().lower() if line in maps: answer = maps[line] break - elif line == '?': + elif line == "?": for entry in choices: f.write(" %s - %s\n" % (entry[0], entry[2])) f.write("\n") @@ -1965,7 +1965,7 @@ def FormatError(err): retcode = 0 else: obuf.write("Unhandled exception: %s" % msg) - return retcode, obuf.getvalue().rstrip('\n') + return retcode, obuf.getvalue().rstrip("\n") def GenericMain(commands, override=None, aliases=None): @@ -2381,7 +2381,7 @@ def GenerateTable(headers, fields, separator, data, if separator is None: mlens = [0 for name in fields] - format_str = ' '.join(format_fields) + format_str = " ".join(format_fields) else: format_str = separator.replace("%", "%%").join(format_fields) @@ -2420,7 +2420,7 @@ def GenerateTable(headers, fields, separator, data, for line in data: args = [] if line is None: - line = ['-' for _ in fields] + line = ["-" for _ in fields] for idx in range(len(fields)): if separator is None: args.append(mlens[idx]) @@ -2826,7 +2826,7 @@ def FormatTimestamp(ts): """ if not isinstance (ts, (tuple, list)) or len(ts) != 2: - return '?' + return "?" sec, usec = ts return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec @@ -2849,11 +2849,11 @@ def ParseTimespec(value): if not value: raise errors.OpPrereqError("Empty time specification passed") suffix_map = { - 's': 1, - 'm': 60, - 'h': 3600, - 'd': 86400, - 'w': 604800, + "s": 1, + "m": 60, + "h": 3600, + "d": 86400, + "w": 604800, } if value[-1] not in suffix_map: try: @@ -2966,7 +2966,7 @@ def _ToStream(stream, txt, *args): stream.write(txt % args) else: stream.write(txt) - stream.write('\n') + stream.write("\n") stream.flush() except IOError, err: if err.errno == errno.EPIPE: diff --git a/lib/client/gnt_cluster.py b/lib/client/gnt_cluster.py index fe6831da26a5169b015eef571ebad69e6d45583c..b74b528d45e7cfb9625971ba6f33cbaa80554af2 100644 --- a/lib/client/gnt_cluster.py +++ b/lib/client/gnt_cluster.py @@ -1242,7 +1242,7 @@ def Epo(opts, args): commands = { - 'init': ( + "init": ( InitCluster, [ArgHost(min=1, max=1)], [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT, HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT, @@ -1252,75 +1252,75 @@ commands = { DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT], "[opts...] <cluster_name>", "Initialises a new cluster configuration"), - 'destroy': ( + "destroy": ( DestroyCluster, ARGS_NONE, [YES_DOIT_OPT], "", "Destroy cluster"), - 'rename': ( + "rename": ( RenameCluster, [ArgHost(min=1, max=1)], [FORCE_OPT, DRY_RUN_OPT], "<new_name>", "Renames the cluster"), - 'redist-conf': ( + "redist-conf": ( RedistributeConfig, ARGS_NONE, [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT], "", "Forces a push of the configuration file and ssconf files" " to the nodes in the cluster"), - 'verify': ( + "verify": ( VerifyCluster, ARGS_NONE, [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT, DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT], "", "Does a check on the cluster configuration"), - 'verify-disks': ( + "verify-disks": ( VerifyDisks, ARGS_NONE, [PRIORITY_OPT], "", "Does a check on the cluster disk status"), - 'repair-disk-sizes': ( + "repair-disk-sizes": ( RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT], "", "Updates mismatches in recorded disk sizes"), - 'master-failover': ( + "master-failover": ( MasterFailover, ARGS_NONE, [NOVOTING_OPT], "", "Makes the current node the master"), - 'master-ping': ( + "master-ping": ( MasterPing, ARGS_NONE, [], "", "Checks if the master is alive"), - 'version': ( + "version": ( ShowClusterVersion, ARGS_NONE, [], "", "Shows the cluster version"), - 'getmaster': ( + "getmaster": ( ShowClusterMaster, ARGS_NONE, [], "", "Shows the cluster master"), - 'copyfile': ( + "copyfile": ( ClusterCopyFile, [ArgFile(min=1, max=1)], [NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT], "[-n node...] <filename>", "Copies a file to all (or only some) nodes"), - 'command': ( + "command": ( RunClusterCommand, [ArgCommand(min=1)], [NODE_LIST_OPT, NODEGROUP_OPT], "[-n node...] <command>", "Runs a command on all (or only some) nodes"), - 'info': ( + "info": ( ShowClusterConfig, ARGS_NONE, [ROMAN_OPT], "[--roman]", "Show cluster configuration"), - 'list-tags': ( + "list-tags": ( ListTags, ARGS_NONE, [], "", "List the tags of the cluster"), - 'add-tags': ( + "add-tags": ( AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT], "tag...", "Add tags to the cluster"), - 'remove-tags': ( + "remove-tags": ( RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT], "tag...", "Remove tags from the cluster"), - 'search-tags': ( + "search-tags": ( SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "", "Searches the tags on all objects on" " the cluster for a given pattern (regex)"), - 'queue': ( + "queue": ( QueueOps, [ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])], [], "drain|undrain|info", "Change queue properties"), - 'watcher': ( + "watcher": ( WatcherOps, [ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]), ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])], [], "{pause <timespec>|continue|info}", "Change watcher properties"), - 'modify': ( + "modify": ( SetClusterParams, ARGS_NONE, [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT, @@ -1347,7 +1347,7 @@ commands = { #: dictionary with aliases for commands aliases = { - 'masterfailover': 'master-failover', + "masterfailover": "master-failover", } diff --git a/lib/client/gnt_debug.py b/lib/client/gnt_debug.py index d91f4bdf025865d459399acb176b64ee67842b08..54037766a035e5467f16300a53dae8e49310003f 100644 --- a/lib/client/gnt_debug.py +++ b/lib/client/gnt_debug.py @@ -143,7 +143,7 @@ def TestAllocator(opts, args): while len(row) < 3: row.append(None) for i in range(3): - if row[i] == '': + if row[i] == "": row[i] = None nic_dict = [{ constants.INIC_MAC: v[0], @@ -616,7 +616,7 @@ def ListLocks(opts, args): # pylint: disable-msg=W0613 commands = { - 'delay': ( + "delay": ( Delay, [ArgUnknown(min=1, max=1)], [cli_option("--no-master", dest="on_master", default=True, action="store_false", help="Do not sleep in the master code"), @@ -627,7 +627,7 @@ commands = { DRY_RUN_OPT, PRIORITY_OPT, ], "[opts...] <duration>", "Executes a TestDelay OpCode"), - 'submit-job': ( + "submit-job": ( GenericOpCodes, [ArgFile(min=1)], [VERBOSE_OPT, cli_option("--op-repeat", type="int", default="1", dest="rep_op", @@ -642,7 +642,7 @@ commands = { ], "<op_list_file...>", "Submits jobs built from json files" " containing a list of serialized opcodes"), - 'iallocator': ( + "iallocator": ( TestAllocator, [ArgUnknown(min=1)], [cli_option("--dir", dest="direction", default=constants.IALLOCATOR_DIR_IN, choices=list(constants.VALID_IALLOCATOR_DIRECTIONS), diff --git a/lib/client/gnt_instance.py b/lib/client/gnt_instance.py index 402629d09c4a8c5c71733a207e529471deaf187f..180c0268b0880ec733bbf748f85ebd71893c8c7b 100644 --- a/lib/client/gnt_instance.py +++ b/lib/client/gnt_instance.py @@ -287,7 +287,7 @@ def BatchCreate(opts, args): "hvparams": {}, "file_storage_dir": None, "force_variant": False, - "file_driver": 'loop'} + "file_driver": "loop"} def _PopulateWithDefaults(spec): """Returns a new hash combined with default values.""" @@ -298,25 +298,25 @@ def BatchCreate(opts, args): def _Validate(spec): """Validate the instance specs.""" # Validate fields required under any circumstances - for required_field in ('os', 'template'): + for required_field in ("os", "template"): if required_field not in spec: raise errors.OpPrereqError('Required field "%s" is missing.' % required_field, errors.ECODE_INVAL) # Validate special fields - if spec['primary_node'] is not None: - if (spec['template'] in constants.DTS_INT_MIRROR and - spec['secondary_node'] is None): - raise errors.OpPrereqError('Template requires secondary node, but' - ' there was no secondary provided.', + if spec["primary_node"] is not None: + if (spec["template"] in constants.DTS_INT_MIRROR and + spec["secondary_node"] is None): + raise errors.OpPrereqError("Template requires secondary node, but" + " there was no secondary provided.", errors.ECODE_INVAL) - elif spec['iallocator'] is None: - raise errors.OpPrereqError('You have to provide at least a primary_node' - ' or an iallocator.', + elif spec["iallocator"] is None: + raise errors.OpPrereqError("You have to provide at least a primary_node" + " or an iallocator.", errors.ECODE_INVAL) - if (spec['hvparams'] and - not isinstance(spec['hvparams'], dict)): - raise errors.OpPrereqError('Hypervisor parameters must be a dict.', + if (spec["hvparams"] and + not isinstance(spec["hvparams"], dict)): + raise errors.OpPrereqError("Hypervisor parameters must be a dict.", errors.ECODE_INVAL) json_filename = args[0] @@ -341,11 +341,11 @@ def BatchCreate(opts, args): specs = _PopulateWithDefaults(specs) _Validate(specs) - hypervisor = specs['hypervisor'] - hvparams = specs['hvparams'] + hypervisor = specs["hypervisor"] + hvparams = specs["hvparams"] disks = [] - for elem in specs['disk_size']: + for elem in specs["disk_size"]: try: size = utils.ParseUnit(elem) except (TypeError, ValueError), err: @@ -354,7 +354,7 @@ def BatchCreate(opts, args): (elem, name, err), errors.ECODE_INVAL) disks.append({"size": size}) - utils.ForceDictType(specs['backend'], constants.BES_PARAMETER_TYPES) + utils.ForceDictType(specs["backend"], constants.BES_PARAMETER_TYPES) utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) tmp_nics = [] @@ -364,34 +364,34 @@ def BatchCreate(opts, args): tmp_nics.append({}) tmp_nics[0][field] = specs[field] - if specs['nics'] is not None and tmp_nics: + if specs["nics"] is not None and tmp_nics: raise errors.OpPrereqError("'nics' list incompatible with using" " individual nic fields as well", errors.ECODE_INVAL) - elif specs['nics'] is not None: - tmp_nics = specs['nics'] + elif specs["nics"] is not None: + tmp_nics = specs["nics"] elif not tmp_nics: tmp_nics = [{}] op = opcodes.OpInstanceCreate(instance_name=name, disks=disks, - disk_template=specs['template'], + disk_template=specs["template"], mode=constants.INSTANCE_CREATE, - os_type=specs['os'], + os_type=specs["os"], force_variant=specs["force_variant"], - pnode=specs['primary_node'], - snode=specs['secondary_node'], + pnode=specs["primary_node"], + snode=specs["secondary_node"], nics=tmp_nics, - start=specs['start'], - ip_check=specs['ip_check'], - name_check=specs['name_check'], + start=specs["start"], + ip_check=specs["ip_check"], + name_check=specs["name_check"], wait_for_sync=True, - iallocator=specs['iallocator'], + iallocator=specs["iallocator"], hypervisor=hypervisor, hvparams=hvparams, - beparams=specs['backend'], - file_storage_dir=specs['file_storage_dir'], - file_driver=specs['file_driver']) + beparams=specs["backend"], + file_storage_dir=specs["file_storage_dir"], + file_driver=specs["file_driver"]) jex.QueueJob(name, op) # we never want to wait, just show the submitted job IDs @@ -438,11 +438,11 @@ def ReinstallInstance(opts, args): choices.append(("%s" % number, entry, entry)) number += 1 - choices.append(('x', 'exit', 'Exit gnt-instance reinstall')) + choices.append(("x", "exit", "Exit gnt-instance reinstall")) selected = AskUser("Enter OS template number (or x to abort):", choices) - if selected == 'exit': + if selected == "exit": ToStderr("User aborted reinstall, exiting") return 1 @@ -1225,7 +1225,7 @@ def ShowInstanceConfig(opts, args): _FormatList(buf, _FormatBlockDevInfo(idx, True, device, opts.roman_integers), 2) - ToStdout(buf.getvalue().rstrip('\n')) + ToStdout(buf.getvalue().rstrip("\n")) return retcode @@ -1276,10 +1276,10 @@ def SetInstanceParams(opts, args): except (TypeError, ValueError): pass if disk_op == constants.DDM_ADD: - if 'size' not in disk_dict: + if "size" not in disk_dict: raise errors.OpPrereqError("Missing required parameter 'size'", errors.ECODE_INVAL) - disk_dict['size'] = utils.ParseUnit(disk_dict['size']) + disk_dict["size"] = utils.ParseUnit(disk_dict["size"]) if (opts.disk_template and opts.disk_template in constants.DTS_INT_MIRROR and @@ -1368,42 +1368,42 @@ add_opts = [ ] commands = { - 'add': ( + "add": ( AddInstance, [ArgHost(min=1, max=1)], COMMON_CREATE_OPTS + add_opts, "[...] -t disk-type -n node[:secondary-node] -o os-type <name>", "Creates and adds a new instance to the cluster"), - 'batch-create': ( + "batch-create": ( BatchCreate, [ArgFile(min=1, max=1)], [DRY_RUN_OPT, PRIORITY_OPT], "<instances.json>", "Create a bunch of instances based on specs in the file."), - 'console': ( + "console": ( ConnectToInstanceConsole, ARGS_ONE_INSTANCE, [SHOWCMD_OPT, PRIORITY_OPT], "[--show-cmd] <instance>", "Opens a console on the specified instance"), - 'failover': ( + "failover": ( FailoverInstance, ARGS_ONE_INSTANCE, [FORCE_OPT, IGNORE_CONSIST_OPT, SUBMIT_OPT, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT], "[-f] <instance>", "Stops the instance and starts it on the backup node," " using the remote mirror (only for mirrored instances)"), - 'migrate': ( + "migrate": ( MigrateInstance, ARGS_ONE_INSTANCE, [FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT], "[-f] <instance>", "Migrate instance to its secondary node" " (only for mirrored instances)"), - 'move': ( + "move": ( MoveInstance, ARGS_ONE_INSTANCE, [FORCE_OPT, SUBMIT_OPT, SINGLE_NODE_OPT, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT], "[-f] <instance>", "Move instance to an arbitrary node" " (only for instances of type file and lv)"), - 'info': ( + "info": ( ShowInstanceConfig, ARGS_MANY_INSTANCES, [STATIC_OPT, ALL_OPT, ROMAN_OPT, PRIORITY_OPT], "[-s] {--all | <instance>...}", "Show information on the specified instance(s)"), - 'list': ( + "list": ( ListInstances, ARGS_MANY_INSTANCES, [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT, FORCE_FILTER_OPT], @@ -1418,44 +1418,44 @@ commands = { [NOHDR_OPT, SEP_OPT], "[fields...]", "Lists all available fields for instances"), - 'reinstall': ( + "reinstall": ( ReinstallInstance, [ArgInstance()], [FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT], "[-f] <instance>", "Reinstall a stopped instance"), - 'remove': ( + "remove": ( RemoveInstance, ARGS_ONE_INSTANCE, [FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT], "[-f] <instance>", "Shuts down the instance and removes it"), - 'rename': ( + "rename": ( RenameInstance, [ArgInstance(min=1, max=1), ArgHost(min=1, max=1)], [NOIPCHECK_OPT, NONAMECHECK_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT], "<instance> <new_name>", "Rename the instance"), - 'replace-disks': ( + "replace-disks": ( ReplaceDisks, ARGS_ONE_INSTANCE, [AUTO_REPLACE_OPT, DISKIDX_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT], "[-s|-p|-n NODE|-I NAME] <instance>", "Replaces all disks for the instance"), - 'modify': ( + "modify": ( SetInstanceParams, ARGS_ONE_INSTANCE, [BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT, SUBMIT_OPT, DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT, OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT], "<instance>", "Alters the parameters of an instance"), - 'shutdown': ( + "shutdown": ( GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()], [m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT], "<instance>", "Stops an instance"), - 'startup': ( + "startup": ( GenericManyOps("startup", _StartupInstance), [ArgInstance()], [FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt, m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt, @@ -1463,39 +1463,39 @@ commands = { BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT, STARTUP_PAUSED_OPT], "<instance>", "Starts an instance"), - 'reboot': ( + "reboot": ( GenericManyOps("reboot", _RebootInstance), [ArgInstance()], [m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT, m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT], "<instance>", "Reboots an instance"), - 'activate-disks': ( + "activate-disks": ( ActivateDisks, ARGS_ONE_INSTANCE, [SUBMIT_OPT, IGNORE_SIZE_OPT, PRIORITY_OPT], "<instance>", "Activate an instance's disks"), - 'deactivate-disks': ( + "deactivate-disks": ( DeactivateDisks, ARGS_ONE_INSTANCE, [FORCE_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT], "[-f] <instance>", "Deactivate an instance's disks"), - 'recreate-disks': ( + "recreate-disks": ( RecreateDisks, ARGS_ONE_INSTANCE, [SUBMIT_OPT, DISKIDX_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT], "<instance>", "Recreate an instance's disks"), - 'grow-disk': ( + "grow-disk": ( GrowDisk, [ArgInstance(min=1, max=1), ArgUnknown(min=1, max=1), ArgUnknown(min=1, max=1)], [SUBMIT_OPT, NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT], "<instance> <disk> <size>", "Grow an instance's disk"), - 'list-tags': ( + "list-tags": ( ListTags, ARGS_ONE_INSTANCE, [PRIORITY_OPT], "<instance_name>", "List the tags of the given instance"), - 'add-tags': ( + "add-tags": ( AddTags, [ArgInstance(min=1, max=1), ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT], "<instance_name> tag...", "Add tags to the given instance"), - 'remove-tags': ( + "remove-tags": ( RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT], "<instance_name> tag...", "Remove tags from given instance"), @@ -1503,8 +1503,8 @@ commands = { #: dictionary with aliases for commands aliases = { - 'start': 'startup', - 'stop': 'shutdown', + "start": "startup", + "stop": "shutdown", } diff --git a/lib/client/gnt_job.py b/lib/client/gnt_job.py index daa3732955922a137b9fd5a115a5b3f5c4666899..4e11eabb73cc69970a31317fea89256ec3f8a9a6 100644 --- a/lib/client/gnt_job.py +++ b/lib/client/gnt_job.py @@ -157,7 +157,7 @@ def AutoArchiveJobs(opts, args): age = args[0] - if age == 'all': + if age == "all": age = -1 else: age = ParseTimespec(age) @@ -357,7 +357,7 @@ def WatchJob(opts, args): commands = { - 'list': ( + "list": ( ListJobs, [ArgJobId()], [NOHDR_OPT, SEP_OPT, FIELDS_OPT], "[job_id ...]", @@ -366,22 +366,22 @@ commands = { " op_status, op_result." " The default field" " list is (in order): %s." % utils.CommaJoin(_LIST_DEF_FIELDS)), - 'archive': ( + "archive": ( ArchiveJobs, [ArgJobId(min=1)], [], "<job-id> [<job-id> ...]", "Archive specified jobs"), - 'autoarchive': ( + "autoarchive": ( AutoArchiveJobs, [ArgSuggest(min=1, max=1, choices=["1d", "1w", "4w", "all"])], [], "<age>", "Auto archive jobs older than the given age"), - 'cancel': ( + "cancel": ( CancelJobs, [ArgJobId(min=1)], [], "<job-id> [<job-id> ...]", "Cancel specified jobs"), - 'info': ( + "info": ( ShowJobs, [ArgJobId(min=1)], [], "<job-id> [<job-id> ...]", "Show detailed information about the specified jobs"), - 'watch': ( + "watch": ( WatchJob, [ArgJobId(min=1, max=1)], [], "<job-id>", "Follows a job and prints its output as it arrives"), } diff --git a/lib/client/gnt_node.py b/lib/client/gnt_node.py index 78a4e02fdbcfe0be6452d23698d7c5ce30bf76aa..1847b4162a43e566bc6a32615994b9758afbd429 100644 --- a/lib/client/gnt_node.py +++ b/lib/client/gnt_node.py @@ -174,7 +174,7 @@ def AddNode(opts, args): readd = opts.readd try: - output = cl.QueryNodes(names=[node], fields=['name', 'sip', 'master'], + output = cl.QueryNodes(names=[node], fields=["name", "sip", "master"], use_locking=False) node_exists, sip, is_master = output[0] except (errors.OpPrereqError, errors.OpExecError): @@ -197,7 +197,7 @@ def AddNode(opts, args): sip = opts.secondary_ip # read the cluster name from the master - output = cl.QueryConfigValues(['cluster_name']) + output = cl.QueryConfigValues(["cluster_name"]) cluster_name = output[0] if not readd and opts.node_setup: @@ -851,7 +851,7 @@ def SetNodeParams(opts, args): commands = { - 'add': ( + "add": ( AddNode, [ArgHost(min=1, max=1)], [SECONDARY_IP_OPT, READD_OPT, NOSSH_KEYCHECK_OPT, NODE_FORCE_JOIN_OPT, NONODE_SETUP_OPT, VERBOSE_OPT, NODEGROUP_OPT, PRIORITY_OPT, @@ -867,23 +867,23 @@ commands = { "[-f] {-I <iallocator> | -n <dst>} <node>", "Relocate the secondary instances from a node" " to other nodes"), - 'failover': ( + "failover": ( FailoverNode, ARGS_ONE_NODE, [FORCE_OPT, IGNORE_CONSIST_OPT, IALLOCATOR_OPT, PRIORITY_OPT], "[-f] <node>", "Stops the primary instances on a node and start them on their" " secondary node (only for instances with drbd disk template)"), - 'migrate': ( + "migrate": ( MigrateNode, ARGS_ONE_NODE, [FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, DST_NODE_OPT, IALLOCATOR_OPT, PRIORITY_OPT], "[-f] <node>", "Migrate all the primary instance on a node away from it" " (only for instances of type drbd)"), - 'info': ( + "info": ( ShowNodeConfig, ARGS_MANY_NODES, [], "[<node_name>...]", "Show information about the node(s)"), - 'list': ( + "list": ( ListNodes, ARGS_MANY_NODES, [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT, FORCE_FILTER_OPT], @@ -897,18 +897,18 @@ commands = { [NOHDR_OPT, SEP_OPT], "[fields...]", "Lists all available fields for nodes"), - 'modify': ( + "modify": ( SetNodeParams, ARGS_ONE_NODE, [FORCE_OPT, SUBMIT_OPT, MC_OPT, DRAINED_OPT, OFFLINE_OPT, CAPAB_MASTER_OPT, CAPAB_VM_OPT, SECONDARY_IP_OPT, AUTO_PROMOTE_OPT, DRY_RUN_OPT, PRIORITY_OPT, NODE_PARAMS_OPT, NODE_POWERED_OPT], "<node_name>", "Alters the parameters of a node"), - 'powercycle': ( + "powercycle": ( PowercycleNode, ARGS_ONE_NODE, [FORCE_OPT, CONFIRM_OPT, DRY_RUN_OPT, PRIORITY_OPT], "<node_name>", "Tries to forcefully powercycle a node"), - 'power': ( + "power": ( PowerNode, [ArgChoice(min=1, max=1, choices=_LIST_POWER_COMMANDS), ArgNode()], @@ -916,28 +916,28 @@ commands = { FORCE_OPT, NOHDR_OPT, SEP_OPT, OOB_TIMEOUT_OPT, POWER_DELAY_OPT], "on|off|cycle|status [nodes...]", "Change power state of node by calling out-of-band helper."), - 'remove': ( + "remove": ( RemoveNode, ARGS_ONE_NODE, [DRY_RUN_OPT, PRIORITY_OPT], "<node_name>", "Removes a node from the cluster"), - 'volumes': ( + "volumes": ( ListVolumes, [ArgNode()], [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, PRIORITY_OPT], "[<node_name>...]", "List logical volumes on node(s)"), - 'list-storage': ( + "list-storage": ( ListStorage, ARGS_MANY_NODES, [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, _STORAGE_TYPE_OPT, PRIORITY_OPT], "[<node_name>...]", "List physical volumes on node(s). The available" " fields are (see the man page for details): %s." % (utils.CommaJoin(_LIST_STOR_HEADERS))), - 'modify-storage': ( + "modify-storage": ( ModifyStorage, [ArgNode(min=1, max=1), ArgChoice(min=1, max=1, choices=_MODIFIABLE_STORAGE_TYPES), ArgFile(min=1, max=1)], [ALLOCATABLE_OPT, DRY_RUN_OPT, PRIORITY_OPT], "<node_name> <storage_type> <name>", "Modify storage volume on a node"), - 'repair-storage': ( + "repair-storage": ( RepairStorage, [ArgNode(min=1, max=1), ArgChoice(min=1, max=1, choices=_REPAIRABLE_STORAGE_TYPES), @@ -945,13 +945,13 @@ commands = { [IGNORE_CONSIST_OPT, DRY_RUN_OPT, PRIORITY_OPT], "<node_name> <storage_type> <name>", "Repairs a storage volume on a node"), - 'list-tags': ( + "list-tags": ( ListTags, ARGS_ONE_NODE, [], "<node_name>", "List the tags of the given node"), - 'add-tags': ( + "add-tags": ( AddTags, [ArgNode(min=1, max=1), ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT], "<node_name> tag...", "Add tags to the given node"), - 'remove-tags': ( + "remove-tags": ( RemoveTags, [ArgNode(min=1, max=1), ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT], "<node_name> tag...", "Remove tags from the given node"), diff --git a/lib/client/gnt_os.py b/lib/client/gnt_os.py index fe786463c47ced240680aae9962ea865d336c3f7..e80aa6513a66d985e4c5893c3fb3c07046f0b15e 100644 --- a/lib/client/gnt_os.py +++ b/lib/client/gnt_os.py @@ -280,17 +280,17 @@ def ModifyOS(opts, args): commands = { - 'list': ( + "list": ( ListOS, ARGS_NONE, [NOHDR_OPT, PRIORITY_OPT], "", "Lists all valid operating systems on the cluster"), - 'diagnose': ( + "diagnose": ( DiagnoseOS, ARGS_NONE, [PRIORITY_OPT], "", "Diagnose all operating systems"), - 'info': ( + "info": ( ShowOSInfo, [ArgOs()], [PRIORITY_OPT], "", "Show detailed information about " "operating systems"), - 'modify': ( + "modify": ( ModifyOS, ARGS_ONE_OS, [HVLIST_OPT, OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, HID_OS_OPT, BLK_OS_OPT], diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 25e3ad5c0e4c6c7b27430867b0b79cd1a8817337..29dfcc91d65576b2b6797e2a6a3be8ab993bd4cb 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -998,20 +998,20 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None): bep = cluster.FillBE(instance) hvp = cluster.FillHV(instance) args = { - 'name': instance.name, - 'primary_node': instance.primary_node, - 'secondary_nodes': instance.secondary_nodes, - 'os_type': instance.os, - 'status': instance.admin_up, - 'memory': bep[constants.BE_MEMORY], - 'vcpus': bep[constants.BE_VCPUS], - 'nics': _NICListToTuple(lu, instance.nics), - 'disk_template': instance.disk_template, - 'disks': [(disk.size, disk.mode) for disk in instance.disks], - 'bep': bep, - 'hvp': hvp, - 'hypervisor_name': instance.hypervisor, - 'tags': instance.tags, + "name": instance.name, + "primary_node": instance.primary_node, + "secondary_nodes": instance.secondary_nodes, + "os_type": instance.os, + "status": instance.admin_up, + "memory": bep[constants.BE_MEMORY], + "vcpus": bep[constants.BE_VCPUS], + "nics": _NICListToTuple(lu, instance.nics), + "disk_template": instance.disk_template, + "disks": [(disk.size, disk.mode) for disk in instance.disks], + "bep": bep, + "hvp": hvp, + "hypervisor_name": instance.hypervisor, + "tags": instance.tags, } if override: args.update(override) @@ -2840,7 +2840,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors): self._ErrorIf(test, self.ENODEHOOKS, node_name, "Script %s failed, output:", script) if test: - output = self._HOOKS_INDENT_RE.sub(' ', output) + output = self._HOOKS_INDENT_RE.sub(" ", output) feedback_fn("%s" % output) lu_result = 0 @@ -4279,13 +4279,13 @@ class LUNodeQueryvols(NoHooksLU): if field == "node": val = node elif field == "phys": - val = vol['dev'] + val = vol["dev"] elif field == "vg": - val = vol['vg'] + val = vol["vg"] elif field == "name": - val = vol['name'] + val = vol["name"] elif field == "size": - val = int(float(vol['size'])) + val = int(float(vol["size"])) elif field == "instance": val = vol2inst.get((node, vol["vg"] + "/" + vol["name"]), "-") else: @@ -5526,7 +5526,7 @@ def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name): nodeinfo = lu.rpc.call_node_info([node], None, hypervisor_name) nodeinfo[node].Raise("Can't get data from node %s" % node, prereq=True, ecode=errors.ECODE_ENVIRON) - free_mem = nodeinfo[node].payload.get('memory_free', None) + free_mem = nodeinfo[node].payload.get("memory_free", None) if not isinstance(free_mem, int): raise errors.OpPrereqError("Can't compute free memory on node %s, result" " was '%s'" % (node, free_mem), @@ -8432,7 +8432,7 @@ class LUInstanceCreate(LogicalUnit): disk_images = [] for idx in range(export_disks): - option = 'disk%d_dump' % idx + option = "disk%d_dump" % idx if export_info.has_option(constants.INISECT_INS, option): # FIXME: are the old os-es, disk sizes, etc. useful? export_name = export_info.get(constants.INISECT_INS, option) @@ -8443,9 +8443,9 @@ class LUInstanceCreate(LogicalUnit): self.src_images = disk_images - old_name = export_info.get(constants.INISECT_INS, 'name') + old_name = export_info.get(constants.INISECT_INS, "name") try: - exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count') + exp_nic_count = export_info.getint(constants.INISECT_INS, "nic_count") except (TypeError, ValueError), err: raise errors.OpPrereqError("Invalid export file, nic_count is not" " an integer: %s" % str(err), @@ -8453,7 +8453,7 @@ class LUInstanceCreate(LogicalUnit): if self.op.instance_name == old_name: for idx, nic in enumerate(self.nics): if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx: - nic_mac_ini = 'nic%d_mac' % idx + nic_mac_ini = "nic%d_mac" % idx nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini) # ENDIF: self.op.mode == constants.INSTANCE_IMPORT @@ -10374,13 +10374,13 @@ class LUInstanceSetParams(LogicalUnit): raise errors.OpPrereqError("Invalid IP address '%s'" % nic_ip, errors.ECODE_INVAL) - nic_bridge = nic_dict.get('bridge', None) + nic_bridge = nic_dict.get("bridge", None) nic_link = nic_dict.get(constants.INIC_LINK, None) if nic_bridge and nic_link: raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'" " at the same time", errors.ECODE_INVAL) elif nic_bridge and nic_bridge.lower() == constants.VALUE_NONE: - nic_dict['bridge'] = None + nic_dict["bridge"] = None elif nic_link and nic_link.lower() == constants.VALUE_NONE: nic_dict[constants.INIC_LINK] = None @@ -10423,13 +10423,13 @@ class LUInstanceSetParams(LogicalUnit): """ args = dict() if constants.BE_MEMORY in self.be_new: - args['memory'] = self.be_new[constants.BE_MEMORY] + args["memory"] = self.be_new[constants.BE_MEMORY] if constants.BE_VCPUS in self.be_new: - args['vcpus'] = self.be_new[constants.BE_VCPUS] + args["vcpus"] = self.be_new[constants.BE_VCPUS] # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk # information at all. if self.op.nics: - args['nics'] = [] + args["nics"] = [] nic_override = dict(self.op.nics) for idx, nic in enumerate(self.instance.nics): if idx in nic_override: @@ -10450,16 +10450,16 @@ class LUInstanceSetParams(LogicalUnit): nicparams = self.cluster.SimpleFillNIC(nic.nicparams) mode = nicparams[constants.NIC_MODE] link = nicparams[constants.NIC_LINK] - args['nics'].append((ip, mac, mode, link)) + args["nics"].append((ip, mac, mode, link)) if constants.DDM_ADD in nic_override: ip = nic_override[constants.DDM_ADD].get(constants.INIC_IP, None) mac = nic_override[constants.DDM_ADD][constants.INIC_MAC] nicparams = self.nic_pnew[constants.DDM_ADD] mode = nicparams[constants.NIC_MODE] link = nicparams[constants.NIC_LINK] - args['nics'].append((ip, mac, mode, link)) + args["nics"].append((ip, mac, mode, link)) elif constants.DDM_REMOVE in nic_override: - del args['nics'][-1] + del args["nics"][-1] env = _BuildInstanceHookEnvByObject(self, self.instance, override=args) if self.op.disk_template: @@ -10577,7 +10577,7 @@ class LUInstanceSetParams(LogicalUnit): # Assume the primary node is unreachable and go ahead self.warn.append("Can't get info from primary node %s: %s" % (pnode, msg)) - elif not isinstance(pninfo.payload.get('memory_free', None), int): + elif not isinstance(pninfo.payload.get("memory_free", None), int): self.warn.append("Node data from primary node %s doesn't contain" " free memory information" % pnode) elif instance_info.fail_msg: @@ -10585,14 +10585,14 @@ class LUInstanceSetParams(LogicalUnit): instance_info.fail_msg) else: if instance_info.payload: - current_mem = int(instance_info.payload['memory']) + current_mem = int(instance_info.payload["memory"]) else: # Assume instance not running # (there is a slight race condition here, but it's not very probable, # and we have no other way to check) current_mem = 0 miss_mem = (be_new[constants.BE_MEMORY] - current_mem - - pninfo.payload['memory_free']) + pninfo.payload["memory_free"]) if miss_mem > 0: raise errors.OpPrereqError("This change will prevent the instance" " from starting, due to %d MB of memory" @@ -10605,11 +10605,11 @@ class LUInstanceSetParams(LogicalUnit): continue nres.Raise("Can't get info from secondary node %s" % node, prereq=True, ecode=errors.ECODE_STATE) - if not isinstance(nres.payload.get('memory_free', None), int): + if not isinstance(nres.payload.get("memory_free", None), int): raise errors.OpPrereqError("Secondary node %s didn't return free" " memory information" % node, errors.ECODE_STATE) - elif be_new[constants.BE_MEMORY] > nres.payload['memory_free']: + elif be_new[constants.BE_MEMORY] > nres.payload["memory_free"]: raise errors.OpPrereqError("This change will prevent the instance" " from failover to its secondary node" " %s, due to not enough memory" % node, @@ -10645,8 +10645,8 @@ class LUInstanceSetParams(LogicalUnit): for key in constants.NICS_PARAMETERS if key in nic_dict]) - if 'bridge' in nic_dict: - update_params_dict[constants.NIC_LINK] = nic_dict['bridge'] + if "bridge" in nic_dict: + update_params_dict[constants.NIC_LINK] = nic_dict["bridge"] new_nic_params = _GetUpdatedParams(old_nic_params, update_params_dict) @@ -10672,12 +10672,12 @@ class LUInstanceSetParams(LogicalUnit): else: nic_ip = old_nic_ip if nic_ip is None: - raise errors.OpPrereqError('Cannot set the nic ip to None' - ' on a routed nic', errors.ECODE_INVAL) + raise errors.OpPrereqError("Cannot set the nic ip to None" + " on a routed nic", errors.ECODE_INVAL) if constants.INIC_MAC in nic_dict: nic_mac = nic_dict[constants.INIC_MAC] if nic_mac is None: - raise errors.OpPrereqError('Cannot set the nic mac to None', + raise errors.OpPrereqError("Cannot set the nic mac to None", errors.ECODE_INVAL) elif nic_mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): # otherwise generate the mac @@ -12565,8 +12565,8 @@ class IAllocator(object): nname) remote_info = nresult.payload - for attr in ['memory_total', 'memory_free', 'memory_dom0', - 'vg_size', 'vg_free', 'cpu_total']: + for attr in ["memory_total", "memory_free", "memory_dom0", + "vg_size", "vg_free", "cpu_total"]: if attr not in remote_info: raise errors.OpExecError("Node '%s' didn't return attribute" " '%s'" % (nname, attr)) @@ -12582,21 +12582,21 @@ class IAllocator(object): if iinfo.name not in node_iinfo[nname].payload: i_used_mem = 0 else: - i_used_mem = int(node_iinfo[nname].payload[iinfo.name]['memory']) + i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"]) i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem - remote_info['memory_free'] -= max(0, i_mem_diff) + remote_info["memory_free"] -= max(0, i_mem_diff) if iinfo.admin_up: i_p_up_mem += beinfo[constants.BE_MEMORY] # compute memory used by instances pnr_dyn = { - "total_memory": remote_info['memory_total'], - "reserved_memory": remote_info['memory_dom0'], - "free_memory": remote_info['memory_free'], - "total_disk": remote_info['vg_size'], - "free_disk": remote_info['vg_free'], - "total_cpus": remote_info['cpu_total'], + "total_memory": remote_info["memory_total"], + "reserved_memory": remote_info["memory_dom0"], + "free_memory": remote_info["memory_free"], + "total_disk": remote_info["vg_size"], + "free_disk": remote_info["vg_free"], + "total_cpus": remote_info["cpu_total"], "i_pri_memory": i_p_mem, "i_pri_up_memory": i_p_up_mem, } diff --git a/lib/constants.py b/lib/constants.py index 7cdd60a6af14e368605b3c6b406d35d8e85dfaa5..466ba20799ef0be705d61965b2242f3a9932006c 100644 --- a/lib/constants.py +++ b/lib/constants.py @@ -210,7 +210,7 @@ PROC_MOUNTS = "/proc/mounts" LUXI_EOM = "\3" LUXI_VERSION = CONFIG_VERSION -# one of 'no', 'yes', 'only' +# one of "no", "yes", "only" SYSLOG_USAGE = _autoconf.SYSLOG_USAGE SYSLOG_NO = "no" SYSLOG_YES = "yes" @@ -438,8 +438,8 @@ EXPORT_MODES = frozenset([ ]) # Lock recalculate mode -LOCKS_REPLACE = 'replace' -LOCKS_APPEND = 'append' +LOCKS_REPLACE = "replace" +LOCKS_APPEND = "append" # Lock timeout (sum) before we should go into blocking acquire (still # can be reset by priority change); computed as max time (10 hours) @@ -559,20 +559,20 @@ RUNPARTS_STATUS = frozenset([RUNPARTS_SKIP, RUNPARTS_RUN, RUNPARTS_ERR]) RPC_ENCODING_ZLIB_BASE64) = range(2) # os related constants -OS_SCRIPT_CREATE = 'create' -OS_SCRIPT_IMPORT = 'import' -OS_SCRIPT_EXPORT = 'export' -OS_SCRIPT_RENAME = 'rename' -OS_SCRIPT_VERIFY = 'verify' +OS_SCRIPT_CREATE = "create" +OS_SCRIPT_IMPORT = "import" +OS_SCRIPT_EXPORT = "export" +OS_SCRIPT_RENAME = "rename" +OS_SCRIPT_VERIFY = "verify" OS_SCRIPTS = frozenset([OS_SCRIPT_CREATE, OS_SCRIPT_IMPORT, OS_SCRIPT_EXPORT, OS_SCRIPT_RENAME, OS_SCRIPT_VERIFY]) -OS_API_FILE = 'ganeti_api_version' -OS_VARIANTS_FILE = 'variants.list' -OS_PARAMETERS_FILE = 'parameters.list' +OS_API_FILE = "ganeti_api_version" +OS_VARIANTS_FILE = "variants.list" +OS_PARAMETERS_FILE = "parameters.list" -OS_VALIDATE_PARAMETERS = 'parameters' +OS_VALIDATE_PARAMETERS = "parameters" OS_VALIDATE_CALLS = frozenset([OS_VALIDATE_PARAMETERS]) # ssh constants @@ -593,11 +593,11 @@ REBOOT_TYPES = frozenset([INSTANCE_REBOOT_SOFT, INSTANCE_REBOOT_HARD, INSTANCE_REBOOT_FULL]) -VTYPE_STRING = 'string' +VTYPE_STRING = "string" VTYPE_MAYBE_STRING = "maybe-string" -VTYPE_BOOL = 'bool' -VTYPE_SIZE = 'size' # size, in MiBs -VTYPE_INT = 'int' +VTYPE_BOOL = "bool" +VTYPE_SIZE = "size" # size, in MiBs +VTYPE_INT = "int" ENFORCEABLE_TYPES = frozenset([ VTYPE_STRING, VTYPE_MAYBE_STRING, @@ -885,7 +885,7 @@ HT_MIGRATION_NONLIVE = "non-live" HT_MIGRATION_MODES = frozenset([HT_MIGRATION_LIVE, HT_MIGRATION_NONLIVE]) # Cluster Verify steps -VERIFY_NPLUSONE_MEM = 'nplusone_mem' +VERIFY_NPLUSONE_MEM = "nplusone_mem" VERIFY_OPTIONAL_CHECKS = frozenset([VERIFY_NPLUSONE_MEM]) # Node verify constants @@ -1169,18 +1169,18 @@ HVC_DEFAULTS = { HT_XEN_PVM: { HV_USE_BOOTLOADER: False, HV_BOOTLOADER_PATH: XEN_BOOTLOADER, - HV_BOOTLOADER_ARGS: '', + HV_BOOTLOADER_ARGS: "", HV_KERNEL_PATH: "/boot/vmlinuz-2.6-xenU", - HV_INITRD_PATH: '', - HV_ROOT_PATH: '/dev/sda1', - HV_KERNEL_ARGS: 'ro', + HV_INITRD_PATH: "", + HV_ROOT_PATH: "/dev/sda1", + HV_KERNEL_ARGS: "ro", HV_MIGRATION_PORT: 8002, HV_MIGRATION_MODE: HT_MIGRATION_LIVE, HV_BLOCKDEV_PREFIX: "sd", }, HT_XEN_HVM: { HV_BOOT_ORDER: "cd", - HV_CDROM_IMAGE_PATH: '', + HV_CDROM_IMAGE_PATH: "", HV_NIC_TYPE: HT_NIC_RTL8139, HV_DISK_TYPE: HT_DISK_PARAVIRTUAL, HV_VNC_BIND_ADDRESS: IP4_ADDRESS_ANY, @@ -1196,24 +1196,24 @@ HVC_DEFAULTS = { }, HT_KVM: { HV_KERNEL_PATH: "/boot/vmlinuz-2.6-kvmU", - HV_INITRD_PATH: '', - HV_KERNEL_ARGS: 'ro', - HV_ROOT_PATH: '/dev/vda1', + HV_INITRD_PATH: "", + HV_KERNEL_ARGS: "ro", + HV_ROOT_PATH: "/dev/vda1", HV_ACPI: True, HV_SERIAL_CONSOLE: True, - HV_VNC_BIND_ADDRESS: '', + HV_VNC_BIND_ADDRESS: "", HV_VNC_TLS: False, - HV_VNC_X509: '', + HV_VNC_X509: "", HV_VNC_X509_VERIFY: False, - HV_VNC_PASSWORD_FILE: '', - HV_KVM_FLOPPY_IMAGE_PATH: '', - HV_CDROM_IMAGE_PATH: '', - HV_KVM_CDROM2_IMAGE_PATH: '', + HV_VNC_PASSWORD_FILE: "", + HV_KVM_FLOPPY_IMAGE_PATH: "", + HV_CDROM_IMAGE_PATH: "", + HV_KVM_CDROM2_IMAGE_PATH: "", HV_BOOT_ORDER: HT_BO_DISK, HV_NIC_TYPE: HT_NIC_PARAVIRTUAL, HV_DISK_TYPE: HT_DISK_PARAVIRTUAL, - HV_KVM_CDROM_DISK_TYPE: '', - HV_USB_MOUSE: '', + HV_KVM_CDROM_DISK_TYPE: "", + HV_USB_MOUSE: "", HV_KEYMAP: "", HV_MIGRATION_PORT: 8102, HV_MIGRATION_BANDWIDTH: 32, # MiB/s @@ -1222,7 +1222,7 @@ HVC_DEFAULTS = { HV_USE_LOCALTIME: False, HV_DISK_CACHE: HT_CACHE_DEFAULT, HV_SECURITY_MODEL: HT_SM_NONE, - HV_SECURITY_DOMAIN: '', + HV_SECURITY_DOMAIN: "", HV_KVM_FLAG: "", HV_VHOST_NET: False, HV_KVM_USE_CHROOT: False, @@ -1333,7 +1333,7 @@ CONFD_CONFIG_RELOAD_RATELIMIT = 2 # This allows us to distinguish different types of confd protocols and handle # them. For example by changing this we can move the whole payload to be # compressed, or move away from json. -CONFD_MAGIC_FOURCC = 'plj0' +CONFD_MAGIC_FOURCC = "plj0" # By default a confd request is sent to the minimum between this number and all # MCs. 6 was chosen because even in the case of a disastrous 50% response rate, diff --git a/lib/http/auth.py b/lib/http/auth.py index 6bfd9e029fd52915b9de7dd1816adcaa4e28ce29..09b0ce72154ddb341adb7d834a14fcb8bdb84ad1 100644 --- a/lib/http/auth.py +++ b/lib/http/auth.py @@ -198,7 +198,7 @@ class HttpServerRequestAuthentication(object): """ try: - creds = base64.b64decode(in_data.encode('ascii')).decode('ascii') + creds = base64.b64decode(in_data.encode("ascii")).decode("ascii") except (TypeError, binascii.Error, UnicodeError): logging.exception("Error when decoding Basic authentication credentials") return False diff --git a/lib/http/server.py b/lib/http/server.py index 7a46af650f14085c359b437acae7dd793ed1810b..52b1340648290c7229d87b5d54583356a5271faa 100644 --- a/lib/http/server.py +++ b/lib/http/server.py @@ -36,10 +36,10 @@ from ganeti import utils from ganeti import netutils -WEEKDAYNAME = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] +WEEKDAYNAME = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] MONTHNAME = [None, - 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', - 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"] # Default error message DEFAULT_ERROR_CONTENT_TYPE = "text/html" @@ -178,7 +178,7 @@ class _HttpClientToServerMessageReader(http.HttpMessageReader): if len(words) == 3: [method, path, version] = words - if version[:5] != 'HTTP/': + if version[:5] != "HTTP/": raise http.HttpBadRequest("Bad request version (%r)" % version) try: diff --git a/lib/hypervisor/hv_base.py b/lib/hypervisor/hv_base.py index 17b74e5bd9be3eaf5f3df60b18455bf345e90bc2..698eed0e063b2e4e53569a691c5d37e2626c459a 100644 --- a/lib/hypervisor/hv_base.py +++ b/lib/hypervisor/hv_base.py @@ -242,7 +242,7 @@ class BaseHypervisor(object): @return: instance migration information - serialized form """ - return '' + return "" def AcceptInstance(self, instance, info, target): """Prepare to accept an instance. @@ -381,16 +381,16 @@ class BaseHypervisor(object): if len(splitfields) > 1: key = splitfields[0].strip() val = splitfields[1].strip() - if key == 'MemTotal': - result['memory_total'] = int(val.split()[0])/1024 - elif key in ('MemFree', 'Buffers', 'Cached'): + if key == "MemTotal": + result["memory_total"] = int(val.split()[0])/1024 + elif key in ("MemFree", "Buffers", "Cached"): sum_free += int(val.split()[0])/1024 - elif key == 'Active': - result['memory_dom0'] = int(val.split()[0])/1024 + elif key == "Active": + result["memory_dom0"] = int(val.split()[0])/1024 except (ValueError, TypeError), err: raise errors.HypervisorError("Failed to compute memory usage: %s" % (err,)) - result['memory_free'] = sum_free + result["memory_free"] = sum_free cpu_total = 0 try: @@ -402,10 +402,10 @@ class BaseHypervisor(object): fh.close() except EnvironmentError, err: raise errors.HypervisorError("Failed to list node info: %s" % (err,)) - result['cpu_total'] = cpu_total + result["cpu_total"] = cpu_total # FIXME: export correct data here - result['cpu_nodes'] = 1 - result['cpu_sockets'] = 1 + result["cpu_nodes"] = 1 + result["cpu_sockets"] = 1 return result diff --git a/lib/hypervisor/hv_fake.py b/lib/hypervisor/hv_fake.py index ff6b10a0349b9a253a16157af83667210e0cf3e5..441e44be07a92f4faa14e44bcb019e02401d6801 100644 --- a/lib/hypervisor/hv_fake.py +++ b/lib/hypervisor/hv_fake.py @@ -200,7 +200,7 @@ class FakeHypervisor(hv_base.BaseHypervisor): result = self.GetLinuxNodeInfo() # substract running instances all_instances = self.GetAllInstancesInfo() - result['memory_free'] -= min(result['memory_free'], + result["memory_free"] -= min(result["memory_free"], sum([row[2] for row in all_instances])) return result diff --git a/lib/hypervisor/hv_kvm.py b/lib/hypervisor/hv_kvm.py index 0f6569d1dbe26271947d63e07007eae538afb21f..b11bf4cf59dca54e9b18129721d1680a25fb4577 100644 --- a/lib/hypervisor/hv_kvm.py +++ b/lib/hypervisor/hv_kvm.py @@ -196,7 +196,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): constants.HV_MEM_PATH: hv_base.OPT_DIR_CHECK, } - _MIGRATION_STATUS_RE = re.compile('Migration\s+status:\s+(\w+)', + _MIGRATION_STATUS_RE = re.compile("Migration\s+status:\s+(\w+)", re.M | re.I) _MIGRATION_INFO_MAX_BAD_ANSWERS = 5 _MIGRATION_INFO_RETRY_DELAY = 2 @@ -257,7 +257,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): memory = 0 vcpus = 0 - arg_list = cmdline.split('\x00') + arg_list = cmdline.split("\x00") while arg_list: arg = arg_list.pop(0) if arg == "-name": @@ -526,15 +526,15 @@ class KVMHypervisor(hv_base.BaseHypervisor): kvm = constants.KVM_PATH kvm_cmd = [kvm] # used just by the vnc server, if enabled - kvm_cmd.extend(['-name', instance.name]) - kvm_cmd.extend(['-m', instance.beparams[constants.BE_MEMORY]]) - kvm_cmd.extend(['-smp', instance.beparams[constants.BE_VCPUS]]) - kvm_cmd.extend(['-pidfile', pidfile]) - kvm_cmd.extend(['-daemonize']) + kvm_cmd.extend(["-name", instance.name]) + kvm_cmd.extend(["-m", instance.beparams[constants.BE_MEMORY]]) + kvm_cmd.extend(["-smp", instance.beparams[constants.BE_VCPUS]]) + kvm_cmd.extend(["-pidfile", pidfile]) + kvm_cmd.extend(["-daemonize"]) if not instance.hvparams[constants.HV_ACPI]: - kvm_cmd.extend(['-no-acpi']) + kvm_cmd.extend(["-no-acpi"]) if startup_paused: - kvm_cmd.extend(['-S']) + kvm_cmd.extend(["-S"]) hvp = instance.hvparams boot_disk = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_DISK @@ -548,13 +548,13 @@ class KVMHypervisor(hv_base.BaseHypervisor): kvm_cmd.extend(["-disable-kvm"]) if boot_network: - kvm_cmd.extend(['-boot', 'n']) + kvm_cmd.extend(["-boot", "n"]) disk_type = hvp[constants.HV_DISK_TYPE] if disk_type == constants.HT_DISK_PARAVIRTUAL: - if_val = ',if=virtio' + if_val = ",if=virtio" else: - if_val = ',if=%s' % disk_type + if_val = ",if=%s" % disk_type # Cache mode disk_cache = hvp[constants.HV_DISK_CACHE] if instance.disk_template in constants.DTS_EXT_MIRROR: @@ -575,14 +575,14 @@ class KVMHypervisor(hv_base.BaseHypervisor): # TODO: handle FD_LOOP and FD_BLKTAP (?) boot_val = "" if boot_disk: - kvm_cmd.extend(['-boot', 'c']) + kvm_cmd.extend(["-boot", "c"]) boot_disk = False if (v_major, v_min) < (0, 14) and disk_type != constants.HT_DISK_IDE: boot_val = ",boot=on" - drive_val = 'file=%s,format=raw%s%s%s' % (dev_path, if_val, boot_val, + drive_val = "file=%s,format=raw%s%s%s" % (dev_path, if_val, boot_val, cache_val) - kvm_cmd.extend(['-drive', drive_val]) + kvm_cmd.extend(["-drive", drive_val]) #Now we can specify a different device type for CDROM devices. cdrom_disk_type = hvp[constants.HV_KVM_CDROM_DISK_TYPE] @@ -591,55 +591,55 @@ class KVMHypervisor(hv_base.BaseHypervisor): iso_image = hvp[constants.HV_CDROM_IMAGE_PATH] if iso_image: - options = ',format=raw,media=cdrom' + options = ",format=raw,media=cdrom" if boot_cdrom: - kvm_cmd.extend(['-boot', 'd']) + kvm_cmd.extend(["-boot", "d"]) if cdrom_disk_type != constants.HT_DISK_IDE: - options = '%s,boot=on,if=%s' % (options, constants.HT_DISK_IDE) + options = "%s,boot=on,if=%s" % (options, constants.HT_DISK_IDE) else: - options = '%s,boot=on' % options + options = "%s,boot=on" % options else: if cdrom_disk_type == constants.HT_DISK_PARAVIRTUAL: - if_val = ',if=virtio' + if_val = ",if=virtio" else: - if_val = ',if=%s' % cdrom_disk_type - options = '%s%s' % (options, if_val) - drive_val = 'file=%s%s' % (iso_image, options) - kvm_cmd.extend(['-drive', drive_val]) + if_val = ",if=%s" % cdrom_disk_type + options = "%s%s" % (options, if_val) + drive_val = "file=%s%s" % (iso_image, options) + kvm_cmd.extend(["-drive", drive_val]) iso_image2 = hvp[constants.HV_KVM_CDROM2_IMAGE_PATH] if iso_image2: - options = ',format=raw,media=cdrom' + options = ",format=raw,media=cdrom" if cdrom_disk_type == constants.HT_DISK_PARAVIRTUAL: - if_val = ',if=virtio' + if_val = ",if=virtio" else: - if_val = ',if=%s' % cdrom_disk_type - options = '%s%s' % (options, if_val) - drive_val = 'file=%s%s' % (iso_image2, options) - kvm_cmd.extend(['-drive', drive_val]) + if_val = ",if=%s" % cdrom_disk_type + options = "%s%s" % (options, if_val) + drive_val = "file=%s%s" % (iso_image2, options) + kvm_cmd.extend(["-drive", drive_val]) floppy_image = hvp[constants.HV_KVM_FLOPPY_IMAGE_PATH] if floppy_image: - options = ',format=raw,media=disk' + options = ",format=raw,media=disk" if boot_floppy: - kvm_cmd.extend(['-boot', 'a']) - options = '%s,boot=on' % options - if_val = ',if=floppy' - options = '%s%s' % (options, if_val) - drive_val = 'file=%s%s' % (floppy_image, options) - kvm_cmd.extend(['-drive', drive_val]) + kvm_cmd.extend(["-boot", "a"]) + options = "%s,boot=on" % options + if_val = ",if=floppy" + options = "%s%s" % (options, if_val) + drive_val = "file=%s%s" % (floppy_image, options) + kvm_cmd.extend(["-drive", drive_val]) kernel_path = hvp[constants.HV_KERNEL_PATH] if kernel_path: - kvm_cmd.extend(['-kernel', kernel_path]) + kvm_cmd.extend(["-kernel", kernel_path]) initrd_path = hvp[constants.HV_INITRD_PATH] if initrd_path: - kvm_cmd.extend(['-initrd', initrd_path]) - root_append = ['root=%s' % hvp[constants.HV_ROOT_PATH], + kvm_cmd.extend(["-initrd", initrd_path]) + root_append = ["root=%s" % hvp[constants.HV_ROOT_PATH], hvp[constants.HV_KERNEL_ARGS]] if hvp[constants.HV_SERIAL_CONSOLE]: - root_append.append('console=ttyS0,38400') - kvm_cmd.extend(['-append', ' '.join(root_append)]) + root_append.append("console=ttyS0,38400") + kvm_cmd.extend(["-append", " ".join(root_append)]) mem_path = hvp[constants.HV_MEM_PATH] if mem_path: @@ -649,10 +649,10 @@ class KVMHypervisor(hv_base.BaseHypervisor): vnc_bind_address = hvp[constants.HV_VNC_BIND_ADDRESS] if mouse_type: - kvm_cmd.extend(['-usb']) - kvm_cmd.extend(['-usbdevice', mouse_type]) + kvm_cmd.extend(["-usb"]) + kvm_cmd.extend(["-usbdevice", mouse_type]) elif vnc_bind_address: - kvm_cmd.extend(['-usbdevice', constants.HT_MOUSE_TABLET]) + kvm_cmd.extend(["-usbdevice", constants.HT_MOUSE_TABLET]) keymap = hvp[constants.HV_KEYMAP] if keymap: @@ -669,53 +669,53 @@ class KVMHypervisor(hv_base.BaseHypervisor): if instance.network_port > constants.VNC_BASE_PORT: display = instance.network_port - constants.VNC_BASE_PORT if vnc_bind_address == constants.IP4_ADDRESS_ANY: - vnc_arg = ':%d' % (display) + vnc_arg = ":%d" % (display) else: - vnc_arg = '%s:%d' % (vnc_bind_address, display) + vnc_arg = "%s:%d" % (vnc_bind_address, display) else: logging.error("Network port is not a valid VNC display (%d < %d)." " Not starting VNC", instance.network_port, constants.VNC_BASE_PORT) - vnc_arg = 'none' + vnc_arg = "none" # Only allow tls and other option when not binding to a file, for now. # kvm/qemu gets confused otherwise about the filename to use. - vnc_append = '' + vnc_append = "" if hvp[constants.HV_VNC_TLS]: - vnc_append = '%s,tls' % vnc_append + vnc_append = "%s,tls" % vnc_append if hvp[constants.HV_VNC_X509_VERIFY]: - vnc_append = '%s,x509verify=%s' % (vnc_append, + vnc_append = "%s,x509verify=%s" % (vnc_append, hvp[constants.HV_VNC_X509]) elif hvp[constants.HV_VNC_X509]: - vnc_append = '%s,x509=%s' % (vnc_append, + vnc_append = "%s,x509=%s" % (vnc_append, hvp[constants.HV_VNC_X509]) if hvp[constants.HV_VNC_PASSWORD_FILE]: - vnc_append = '%s,password' % vnc_append + vnc_append = "%s,password" % vnc_append - vnc_arg = '%s%s' % (vnc_arg, vnc_append) + vnc_arg = "%s%s" % (vnc_arg, vnc_append) else: - vnc_arg = 'unix:%s/%s.vnc' % (vnc_bind_address, instance.name) + vnc_arg = "unix:%s/%s.vnc" % (vnc_bind_address, instance.name) - kvm_cmd.extend(['-vnc', vnc_arg]) + kvm_cmd.extend(["-vnc", vnc_arg]) else: - kvm_cmd.extend(['-nographic']) + kvm_cmd.extend(["-nographic"]) monitor_dev = ("unix:%s,server,nowait" % self._InstanceMonitor(instance.name)) - kvm_cmd.extend(['-monitor', monitor_dev]) + kvm_cmd.extend(["-monitor", monitor_dev]) if hvp[constants.HV_SERIAL_CONSOLE]: - serial_dev = ('unix:%s,server,nowait' % + serial_dev = ("unix:%s,server,nowait" % self._InstanceSerial(instance.name)) - kvm_cmd.extend(['-serial', serial_dev]) + kvm_cmd.extend(["-serial", serial_dev]) else: - kvm_cmd.extend(['-serial', 'none']) + kvm_cmd.extend(["-serial", "none"]) if hvp[constants.HV_USE_LOCALTIME]: - kvm_cmd.extend(['-localtime']) + kvm_cmd.extend(["-localtime"]) if hvp[constants.HV_KVM_USE_CHROOT]: - kvm_cmd.extend(['-chroot', self._InstanceChrootDir(instance.name)]) + kvm_cmd.extend(["-chroot", self._InstanceChrootDir(instance.name)]) # Save the current instance nics, but defer their expansion as parameters, # as we'll need to generate executable temp files for them. @@ -868,7 +868,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): if incoming: target, port = incoming - kvm_cmd.extend(['-incoming', 'tcp:%s:%s' % (target, port)]) + kvm_cmd.extend(["-incoming", "tcp:%s:%s" % (target, port)]) # Changing the vnc password doesn't bother the guest that much. At most it # will surprise people who connect to it. Whether positively or negatively @@ -919,7 +919,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): data=tap) if vnc_pwd: - change_cmd = 'change vnc password %s' % vnc_pwd + change_cmd = "change vnc password %s" % vnc_pwd self._CallMonitorCommand(instance.name, change_cmd) for filename in temp_files: @@ -986,7 +986,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): if force or not acpi: utils.KillProcess(pid) else: - self._CallMonitorCommand(name, 'system_powerdown') + self._CallMonitorCommand(name, "system_powerdown") def CleanupInstance(self, instance_name): """Cleanup after a stopped instance @@ -1097,20 +1097,20 @@ class KVMHypervisor(hv_base.BaseHypervisor): raise errors.HypervisorError("Instance not running, cannot migrate") if not live: - self._CallMonitorCommand(instance_name, 'stop') + self._CallMonitorCommand(instance_name, "stop") - migrate_command = ('migrate_set_speed %dm' % + migrate_command = ("migrate_set_speed %dm" % instance.hvparams[constants.HV_MIGRATION_BANDWIDTH]) self._CallMonitorCommand(instance_name, migrate_command) - migrate_command = ('migrate_set_downtime %dms' % + migrate_command = ("migrate_set_downtime %dms" % instance.hvparams[constants.HV_MIGRATION_DOWNTIME]) self._CallMonitorCommand(instance_name, migrate_command) - migrate_command = 'migrate -d tcp:%s:%s' % (target, port) + migrate_command = "migrate -d tcp:%s:%s" % (target, port) self._CallMonitorCommand(instance_name, migrate_command) - info_command = 'info migrate' + info_command = "info migrate" done = False broken_answers = 0 while not done: @@ -1126,13 +1126,13 @@ class KVMHypervisor(hv_base.BaseHypervisor): time.sleep(self._MIGRATION_INFO_RETRY_DELAY) else: status = match.group(1) - if status == 'completed': + if status == "completed": done = True - elif status == 'active': + elif status == "active": # reset the broken answers count broken_answers = 0 time.sleep(self._MIGRATION_INFO_RETRY_DELAY) - elif status == 'failed' or status == 'cancelled': + elif status == "failed" or status == "cancelled": if not live: self._CallMonitorCommand(instance_name, 'cont') raise errors.HypervisorError("Migration %s at the kvm level" % diff --git a/lib/hypervisor/hv_xen.py b/lib/hypervisor/hv_xen.py index ff067c018c5ed031871559670162a38ad6e157e0..7693bead17b2f89227823d2bf38443e86eb806aa 100644 --- a/lib/hypervisor/hv_xen.py +++ b/lib/hypervisor/hv_xen.py @@ -46,8 +46,8 @@ class XenHypervisor(hv_base.BaseHypervisor): REBOOT_RETRY_INTERVAL = 10 ANCILLARY_FILES = [ - '/etc/xen/xend-config.sxp', - '/etc/xen/scripts/vif-bridge', + "/etc/xen/xend-config.sxp", + "/etc/xen/scripts/vif-bridge", ] @classmethod @@ -142,7 +142,7 @@ class XenHypervisor(hv_base.BaseHypervisor): " line: %s, error: %s" % (line, err)) # skip the Domain-0 (optional) - if include_node or data[0] != 'Domain-0': + if include_node or data[0] != "Domain-0": result.append(data) return result @@ -275,26 +275,26 @@ class XenHypervisor(hv_base.BaseHypervisor): if len(splitfields) > 1: key = splitfields[0].strip() val = splitfields[1].strip() - if key == 'memory' or key == 'total_memory': - result['memory_total'] = int(val) - elif key == 'free_memory': - result['memory_free'] = int(val) - elif key == 'nr_cpus': - nr_cpus = result['cpu_total'] = int(val) - elif key == 'nr_nodes': - result['cpu_nodes'] = int(val) - elif key == 'cores_per_socket': + if key == "memory" or key == "total_memory": + result["memory_total"] = int(val) + elif key == "free_memory": + result["memory_free"] = int(val) + elif key == "nr_cpus": + nr_cpus = result["cpu_total"] = int(val) + elif key == "nr_nodes": + result["cpu_nodes"] = int(val) + elif key == "cores_per_socket": cores_per_socket = int(val) - elif key == 'threads_per_core': + elif key == "threads_per_core": threads_per_core = int(val) if (cores_per_socket is not None and threads_per_core is not None and nr_cpus is not None): - result['cpu_sockets'] = nr_cpus / (cores_per_socket * threads_per_core) + result["cpu_sockets"] = nr_cpus / (cores_per_socket * threads_per_core) dom0_info = self.GetInstanceInfo("Domain-0") if dom0_info is not None: - result['memory_dom0'] = dom0_info[2] + result["memory_dom0"] = dom0_info[2] return result @@ -344,7 +344,7 @@ class XenHypervisor(hv_base.BaseHypervisor): if len(block_devices) > 24: # 'z' - 'a' = 24 raise errors.HypervisorError("Too many disks") - namespace = [blockdev_prefix + chr(i + ord('a')) for i in range(24)] + namespace = [blockdev_prefix + chr(i + ord("a")) for i in range(24)] for sd_name, (cfdev, dev_path) in zip(namespace, block_devices): if cfdev.mode == constants.DISK_RDWR: mode = "w" diff --git a/lib/netutils.py b/lib/netutils.py index fc864eb95c9eff4092cb426df77affbf54085735..8b51e466f656e4b86377979a220a011efc08825c 100644 --- a/lib/netutils.py +++ b/lib/netutils.py @@ -476,16 +476,16 @@ class IP6Address(IPAddress): # We have a shorthand address, expand it parts = [] twoparts = address.split("::") - sep = len(twoparts[0].split(':')) + len(twoparts[1].split(':')) - parts = twoparts[0].split(':') + sep = len(twoparts[0].split(":")) + len(twoparts[1].split(":")) + parts = twoparts[0].split(":") [parts.append("0") for _ in range(8 - sep)] - parts += twoparts[1].split(':') + parts += twoparts[1].split(":") else: parts = address.split(":") address_int = 0 for part in parts: - address_int = (address_int << 16) + int(part or '0', 16) + address_int = (address_int << 16) + int(part or "0", 16) return address_int diff --git a/lib/rapi/connector.py b/lib/rapi/connector.py index 55dfe86f3add5e98030abf6028b49b307fbbeecc..34c13fd1b16224ae23bcfd3185789eb52df064d1 100644 --- a/lib/rapi/connector.py +++ b/lib/rapi/connector.py @@ -70,8 +70,8 @@ class Mapper: - args: a dictionary with additional parameters from URL """ - if '?' in uri: - (path, query) = uri.split('?', 1) + if "?" in uri: + (path, query) = uri.split("?", 1) args = cgi.parse_qs(query) else: path = uri @@ -107,7 +107,7 @@ class R_root(baserlib.R_Generic): m = cls._ROOT_PATTERN.match(handler.__name__) if m: name = m.group(1) - if name != 'root': + if name != "root": rootlist.append(name) return baserlib.BuildUriList(rootlist, "/%s") @@ -121,7 +121,7 @@ def _getResources(id_): @return: a list of resources names. """ - r_pattern = re.compile('^R_%s_([a-zA-Z0-9]+)$' % id_) + r_pattern = re.compile("^R_%s_([a-zA-Z0-9]+)$" % id_) rlist = [] for handler in CONNECTOR.values(): @@ -167,72 +167,72 @@ def GetHandlers(node_name_pattern, instance_name_pattern, "/2": R_2, "/2/nodes": rlib2.R_2_nodes, - re.compile(r'^/2/nodes/(%s)$' % node_name_pattern): + re.compile(r"^/2/nodes/(%s)$" % node_name_pattern): rlib2.R_2_nodes_name, - re.compile(r'^/2/nodes/(%s)/tags$' % node_name_pattern): + re.compile(r"^/2/nodes/(%s)/tags$" % node_name_pattern): rlib2.R_2_nodes_name_tags, - re.compile(r'^/2/nodes/(%s)/role$' % node_name_pattern): + re.compile(r"^/2/nodes/(%s)/role$" % node_name_pattern): rlib2.R_2_nodes_name_role, - re.compile(r'^/2/nodes/(%s)/evacuate$' % node_name_pattern): + re.compile(r"^/2/nodes/(%s)/evacuate$" % node_name_pattern): rlib2.R_2_nodes_name_evacuate, - re.compile(r'^/2/nodes/(%s)/migrate$' % node_name_pattern): + re.compile(r"^/2/nodes/(%s)/migrate$" % node_name_pattern): rlib2.R_2_nodes_name_migrate, - re.compile(r'^/2/nodes/(%s)/storage$' % node_name_pattern): + re.compile(r"^/2/nodes/(%s)/storage$" % node_name_pattern): rlib2.R_2_nodes_name_storage, - re.compile(r'^/2/nodes/(%s)/storage/modify$' % node_name_pattern): + re.compile(r"^/2/nodes/(%s)/storage/modify$" % node_name_pattern): rlib2.R_2_nodes_name_storage_modify, - re.compile(r'^/2/nodes/(%s)/storage/repair$' % node_name_pattern): + re.compile(r"^/2/nodes/(%s)/storage/repair$" % node_name_pattern): rlib2.R_2_nodes_name_storage_repair, "/2/instances": rlib2.R_2_instances, - re.compile(r'^/2/instances/(%s)$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)$" % instance_name_pattern): rlib2.R_2_instances_name, - re.compile(r'^/2/instances/(%s)/info$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/info$" % instance_name_pattern): rlib2.R_2_instances_name_info, - re.compile(r'^/2/instances/(%s)/tags$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/tags$" % instance_name_pattern): rlib2.R_2_instances_name_tags, - re.compile(r'^/2/instances/(%s)/reboot$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/reboot$" % instance_name_pattern): rlib2.R_2_instances_name_reboot, - re.compile(r'^/2/instances/(%s)/reinstall$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/reinstall$" % instance_name_pattern): rlib2.R_2_instances_name_reinstall, - re.compile(r'^/2/instances/(%s)/replace-disks$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/replace-disks$" % instance_name_pattern): rlib2.R_2_instances_name_replace_disks, - re.compile(r'^/2/instances/(%s)/shutdown$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/shutdown$" % instance_name_pattern): rlib2.R_2_instances_name_shutdown, - re.compile(r'^/2/instances/(%s)/startup$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/startup$" % instance_name_pattern): rlib2.R_2_instances_name_startup, - re.compile(r'^/2/instances/(%s)/activate-disks$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/activate-disks$" % instance_name_pattern): rlib2.R_2_instances_name_activate_disks, - re.compile(r'^/2/instances/(%s)/deactivate-disks$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/deactivate-disks$" % instance_name_pattern): rlib2.R_2_instances_name_deactivate_disks, - re.compile(r'^/2/instances/(%s)/prepare-export$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/prepare-export$" % instance_name_pattern): rlib2.R_2_instances_name_prepare_export, - re.compile(r'^/2/instances/(%s)/export$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/export$" % instance_name_pattern): rlib2.R_2_instances_name_export, - re.compile(r'^/2/instances/(%s)/migrate$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/migrate$" % instance_name_pattern): rlib2.R_2_instances_name_migrate, - re.compile(r'^/2/instances/(%s)/failover$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/failover$" % instance_name_pattern): rlib2.R_2_instances_name_failover, - re.compile(r'^/2/instances/(%s)/rename$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/rename$" % instance_name_pattern): rlib2.R_2_instances_name_rename, - re.compile(r'^/2/instances/(%s)/modify$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/modify$" % instance_name_pattern): rlib2.R_2_instances_name_modify, re.compile(r"^/2/instances/(%s)/disk/(%s)/grow$" % (instance_name_pattern, disk_pattern)): rlib2.R_2_instances_name_disk_grow, - re.compile(r'^/2/instances/(%s)/console$' % instance_name_pattern): + re.compile(r"^/2/instances/(%s)/console$" % instance_name_pattern): rlib2.R_2_instances_name_console, "/2/groups": rlib2.R_2_groups, - re.compile(r'^/2/groups/(%s)$' % group_name_pattern): + re.compile(r"^/2/groups/(%s)$" % group_name_pattern): rlib2.R_2_groups_name, - re.compile(r'^/2/groups/(%s)/modify$' % group_name_pattern): + re.compile(r"^/2/groups/(%s)/modify$" % group_name_pattern): rlib2.R_2_groups_name_modify, - re.compile(r'^/2/groups/(%s)/rename$' % group_name_pattern): + re.compile(r"^/2/groups/(%s)/rename$" % group_name_pattern): rlib2.R_2_groups_name_rename, - re.compile(r'^/2/groups/(%s)/assign-nodes$' % group_name_pattern): + re.compile(r"^/2/groups/(%s)/assign-nodes$" % group_name_pattern): rlib2.R_2_groups_name_assign_nodes, - re.compile(r'^/2/groups/(%s)/tags$' % group_name_pattern): + re.compile(r"^/2/groups/(%s)/tags$" % group_name_pattern): rlib2.R_2_groups_name_tags, "/2/jobs": rlib2.R_2_jobs, diff --git a/lib/rapi/rlib2.py b/lib/rapi/rlib2.py index 37df839e205b010f7cbd6b44984838feae0dc87b..74159489663f5cb5a4e32040b2c81e3cdcfb4395 100644 --- a/lib/rapi/rlib2.py +++ b/lib/rapi/rlib2.py @@ -837,9 +837,9 @@ class R_2_instances_name_reboot(baserlib.R_Generic): """ instance_name = self.items[0] - reboot_type = self.queryargs.get('type', + reboot_type = self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0] - ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries')) + ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries")) op = opcodes.OpInstanceReboot(instance_name=instance_name, reboot_type=reboot_type, ignore_secondaries=ignore_secondaries, @@ -862,8 +862,8 @@ class R_2_instances_name_startup(baserlib.R_Generic): """ instance_name = self.items[0] - force_startup = bool(self._checkIntVariable('force')) - no_remember = bool(self._checkIntVariable('no_remember')) + force_startup = bool(self._checkIntVariable("force")) + no_remember = bool(self._checkIntVariable("no_remember")) op = opcodes.OpInstanceStartup(instance_name=instance_name, force=force_startup, dry_run=bool(self.dryRun()), @@ -900,7 +900,7 @@ class R_2_instances_name_shutdown(baserlib.R_Generic): """ baserlib.CheckType(self.request_body, dict, "Body contents") - no_remember = bool(self._checkIntVariable('no_remember')) + no_remember = bool(self._checkIntVariable("no_remember")) op = _ParseShutdownInstanceRequest(self.items[0], self.request_body, bool(self.dryRun()), no_remember) @@ -1015,7 +1015,7 @@ class R_2_instances_name_activate_disks(baserlib.R_Generic): """ instance_name = self.items[0] - ignore_size = bool(self._checkIntVariable('ignore_size')) + ignore_size = bool(self._checkIntVariable("ignore_size")) op = opcodes.OpInstanceActivateDisks(instance_name=instance_name, ignore_size=ignore_size) @@ -1355,11 +1355,11 @@ class _R_Tags(baserlib.R_Generic): """ # pylint: disable-msg=W0212 - if 'tag' not in self.queryargs: + if "tag" not in self.queryargs: raise http.HttpBadRequest("Please specify tag(s) to add using the" " the 'tag' parameter") return baserlib._Tags_PUT(self.TAG_LEVEL, - self.queryargs['tag'], name=self.name, + self.queryargs["tag"], name=self.name, dry_run=bool(self.dryRun())) def DELETE(self): @@ -1371,12 +1371,12 @@ class _R_Tags(baserlib.R_Generic): """ # pylint: disable-msg=W0212 - if 'tag' not in self.queryargs: + if "tag" not in self.queryargs: # no we not gonna delete all tags raise http.HttpBadRequest("Cannot delete all tags - please specify" " tag(s) using the 'tag' parameter") return baserlib._Tags_DELETE(self.TAG_LEVEL, - self.queryargs['tag'], + self.queryargs["tag"], name=self.name, dry_run=bool(self.dryRun())) diff --git a/lib/serializer.py b/lib/serializer.py index 9a5f1ce4b5cb370ef38db78513c10dba8c48e94a..81f1add970f237a69ffcf0a156af131884547891 100644 --- a/lib/serializer.py +++ b/lib/serializer.py @@ -38,7 +38,7 @@ from ganeti import utils _JSON_INDENT = 2 -_RE_EOLSP = re.compile('[ \t]+$', re.MULTILINE) +_RE_EOLSP = re.compile("[ \t]+$", re.MULTILINE) def _GetJsonDumpers(_encoder_class=simplejson.JSONEncoder): @@ -79,8 +79,8 @@ def DumpJson(data, indent=True): fn = _DumpJson txt = _RE_EOLSP.sub("", fn(data)) - if not txt.endswith('\n'): - txt += '\n' + if not txt.endswith("\n"): + txt += "\n" return txt @@ -108,10 +108,10 @@ def DumpSignedJson(data, key, salt=None, key_selector=None): """ txt = DumpJson(data, indent=False) if salt is None: - salt = '' + salt = "" signed_dict = { - 'msg': txt, - 'salt': salt, + "msg": txt, + "salt": salt, } if key_selector: @@ -138,13 +138,13 @@ def LoadSignedJson(txt, key): """ signed_dict = LoadJson(txt) if not isinstance(signed_dict, dict): - raise errors.SignatureError('Invalid external message') + raise errors.SignatureError("Invalid external message") try: - msg = signed_dict['msg'] - salt = signed_dict['salt'] - hmac_sign = signed_dict['hmac'] + msg = signed_dict["msg"] + salt = signed_dict["salt"] + hmac_sign = signed_dict["hmac"] except KeyError: - raise errors.SignatureError('Invalid external message') + raise errors.SignatureError("Invalid external message") if callable(key): # pylint: disable-msg=E1103 @@ -159,7 +159,7 @@ def LoadSignedJson(txt, key): if not utils.VerifySha1Hmac(hmac_key, msg, hmac_sign, salt=salt + key_selector): - raise errors.SignatureError('Invalid Signature') + raise errors.SignatureError("Invalid Signature") return LoadJson(msg), salt diff --git a/lib/ssconf.py b/lib/ssconf.py index 5498ba361d4fe1daf2c3ad64be7a2b662bedcc2a..c1ef0966e36762e9df964ace06f9e4f6f93cba05 100644 --- a/lib/ssconf.py +++ b/lib/ssconf.py @@ -41,7 +41,7 @@ from ganeti import netutils SSCONF_LOCK_TIMEOUT = 10 -RE_VALID_SSCONF_NAME = re.compile(r'^[-_a-z0-9]+$') +RE_VALID_SSCONF_NAME = re.compile(r"^[-_a-z0-9]+$") class SimpleConfigReader(object): @@ -111,17 +111,17 @@ class SimpleConfigReader(object): self._ip_to_inst_by_link = {} self._instances_ips = [] self._inst_ips_by_link = {} - c_nparams = self._config_data['cluster']['nicparams'][constants.PP_DEFAULT] - for iname in self._config_data['instances']: - instance = self._config_data['instances'][iname] - for nic in instance['nics']: - if 'ip' in nic and nic['ip']: - params = objects.FillDict(c_nparams, nic['nicparams']) - if not params['link'] in self._inst_ips_by_link: - self._inst_ips_by_link[params['link']] = [] - self._ip_to_inst_by_link[params['link']] = {} - self._ip_to_inst_by_link[params['link']][nic['ip']] = iname - self._inst_ips_by_link[params['link']].append(nic['ip']) + c_nparams = self._config_data["cluster"]["nicparams"][constants.PP_DEFAULT] + for iname in self._config_data["instances"]: + instance = self._config_data["instances"][iname] + for nic in instance["nics"]: + if "ip" in nic and nic["ip"]: + params = objects.FillDict(c_nparams, nic["nicparams"]) + if not params["link"] in self._inst_ips_by_link: + self._inst_ips_by_link[params["link"]] = [] + self._ip_to_inst_by_link[params["link"]] = {} + self._ip_to_inst_by_link[params["link"]][nic["ip"]] = iname + self._inst_ips_by_link[params["link"]].append(nic["ip"]) self._nodes_primary_ips = [] self._mc_primary_ips = [] @@ -310,7 +310,7 @@ class SimpleStore(object): raise errors.ProgrammerError("Invalid key requested from SSConf: '%s'" % str(key)) - filename = self._cfg_dir + '/' + self._SS_FILEPREFIX + key + filename = self._cfg_dir + "/" + self._SS_FILEPREFIX + key return filename def _ReadFile(self, key, default=None): @@ -328,7 +328,7 @@ class SimpleStore(object): return default raise errors.ConfigurationError("Can't read from the ssconf file:" " '%s'" % str(err)) - data = data.rstrip('\n') + data = data.rstrip("\n") return data def WriteFiles(self, values): diff --git a/lib/utils/__init__.py b/lib/utils/__init__.py index cdf6a13e26e855f0fe7aee27aecc5c1097211c09..d0df82173c8df6ea0b7a6a26191aefbe4fa7d4a9 100644 --- a/lib/utils/__init__.py +++ b/lib/utils/__init__.py @@ -57,8 +57,8 @@ from ganeti.utils.x509 import * _VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$") -UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-' - '[a-f0-9]{4}-[a-f0-9]{12}$') +UUID_RE = re.compile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-" + "[a-f0-9]{4}-[a-f0-9]{12}$") def ForceDictType(target, key_types, allowed_values=None): @@ -98,7 +98,7 @@ def ForceDictType(target, key_types, allowed_values=None): pass elif not isinstance(target[key], basestring): if isinstance(target[key], bool) and not target[key]: - target[key] = '' + target[key] = "" else: msg = "'%s' (value %s) is not a valid string" % (key, target[key]) raise errors.TypeEnforcementError(msg) @@ -546,15 +546,15 @@ def SignalHandled(signums): """ def wrap(fn): def sig_function(*args, **kwargs): - assert 'signal_handlers' not in kwargs or \ - kwargs['signal_handlers'] is None or \ - isinstance(kwargs['signal_handlers'], dict), \ + assert "signal_handlers" not in kwargs or \ + kwargs["signal_handlers"] is None or \ + isinstance(kwargs["signal_handlers"], dict), \ "Wrong signal_handlers parameter in original function call" - if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None: - signal_handlers = kwargs['signal_handlers'] + if "signal_handlers" in kwargs and kwargs["signal_handlers"] is not None: + signal_handlers = kwargs["signal_handlers"] else: signal_handlers = {} - kwargs['signal_handlers'] = signal_handlers + kwargs["signal_handlers"] = signal_handlers sighandler = SignalHandler(signums) try: for sig in signums: diff --git a/lib/utils/io.py b/lib/utils/io.py index 08c99908b656c9d872ddd856a14cd29d397993cb..4c1df6c17b02b5d183c2e35c6e07b9870226875f 100644 --- a/lib/utils/io.py +++ b/lib/utils/io.py @@ -362,10 +362,10 @@ def CreateBackup(file_name): (os.path.basename(file_name), TimestampForFilename())) dir_name = os.path.dirname(file_name) - fsrc = open(file_name, 'rb') + fsrc = open(file_name, "rb") try: (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name) - fdst = os.fdopen(fd, 'wb') + fdst = os.fdopen(fd, "wb") try: logging.debug("Backing up %s at %s", file_name, backup_name) shutil.copyfileobj(fsrc, fdst) @@ -632,7 +632,7 @@ def AddAuthorizedKey(file_obj, key): key_fields = key.split() if isinstance(file_obj, basestring): - f = open(file_obj, 'a+') + f = open(file_obj, "a+") else: f = file_obj @@ -642,11 +642,11 @@ def AddAuthorizedKey(file_obj, key): # Ignore whitespace changes if line.split() == key_fields: break - nl = line.endswith('\n') + nl = line.endswith("\n") else: if not nl: f.write("\n") - f.write(key.rstrip('\r\n')) + f.write(key.rstrip("\r\n")) f.write("\n") f.flush() finally: @@ -666,9 +666,9 @@ def RemoveAuthorizedKey(file_name, key): fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name)) try: - out = os.fdopen(fd, 'w') + out = os.fdopen(fd, "w") try: - f = open(file_name, 'r') + f = open(file_name, "r") try: for line in f: # Ignore whitespace changes while comparing lines diff --git a/lib/utils/text.py b/lib/utils/text.py index d8e0984a3ae001b15f7b5007e1d52b031b77852d..bfce363d5a8704cef4632e3869190511d1c009e5 100644 --- a/lib/utils/text.py +++ b/lib/utils/text.py @@ -35,7 +35,7 @@ from ganeti import errors _PARSEUNIT_REGEX = re.compile(r"^([.\d]+)\s*([a-zA-Z]+)?$") #: Characters which don't need to be quoted for shell commands -_SHELL_UNQUOTED_RE = re.compile('^[-.,=:/_+@A-Za-z0-9]+$') +_SHELL_UNQUOTED_RE = re.compile("^[-.,=:/_+@A-Za-z0-9]+$") #: MAC checker regexp _MAC_CHECK_RE = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I) @@ -143,24 +143,24 @@ def FormatUnit(value, units): @return: the formatted value (with suffix) """ - if units not in ('m', 'g', 't', 'h'): + if units not in ("m", "g", "t", "h"): raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units)) - suffix = '' + suffix = "" - if units == 'm' or (units == 'h' and value < 1024): - if units == 'h': - suffix = 'M' + if units == "m" or (units == "h" and value < 1024): + if units == "h": + suffix = "M" return "%d%s" % (round(value, 0), suffix) - elif units == 'g' or (units == 'h' and value < (1024 * 1024)): - if units == 'h': - suffix = 'G' + elif units == "g" or (units == "h" and value < (1024 * 1024)): + if units == "h": + suffix = "G" return "%0.1f%s" % (round(float(value) / 1024, 1), suffix) else: - if units == 'h': - suffix = 'T' + if units == "h": + suffix = "T" return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix) @@ -182,16 +182,16 @@ def ParseUnit(input_string): if unit: lcunit = unit.lower() else: - lcunit = 'm' + lcunit = "m" - if lcunit in ('m', 'mb', 'mib'): + if lcunit in ("m", "mb", "mib"): # Value already in MiB pass - elif lcunit in ('g', 'gb', 'gib'): + elif lcunit in ("g", "gb", "gib"): value *= 1024 - elif lcunit in ('t', 'tb', 'tib'): + elif lcunit in ("t", "tb", "tib"): value *= 1024 * 1024 else: @@ -334,15 +334,15 @@ def SafeEncode(text): """ if isinstance(text, unicode): # only if unicode; if str already, we handle it below - text = text.encode('ascii', 'backslashreplace') + text = text.encode("ascii", "backslashreplace") resu = "" for char in text: c = ord(char) - if char == '\t': - resu += r'\t' - elif char == '\n': - resu += r'\n' - elif char == '\r': + if char == "\t": + resu += r"\t" + elif char == "\n": + resu += r"\n" + elif char == "\r": resu += r'\'r' elif c < 32 or c >= 127: # non-printable resu += "\\x%02x" % (c & 0xff) diff --git a/qa/ganeti-qa.py b/qa/ganeti-qa.py index 8b3d8f93f532888ce6dd7e2b4596c3e68cfd8d94..6fa83c72727bdc0da2e4c0c89f6e18c0326d1f42 100755 --- a/qa/ganeti-qa.py +++ b/qa/ganeti-qa.py @@ -245,7 +245,7 @@ def RunCommonInstanceTests(instance): RunTestIf("instance-reboot", qa_instance.TestInstanceReboot, instance) - if qa_config.TestEnabled('instance-rename'): + if qa_config.TestEnabled("instance-rename"): rename_source = instance["name"] rename_target = qa_config.get("rename", None) RunTest(qa_instance.TestInstanceShutdown, instance) @@ -310,7 +310,7 @@ def RunExportImportTests(instance, pnode, snode): otherwise None """ - if qa_config.TestEnabled('instance-export'): + if qa_config.TestEnabled("instance-export"): RunTest(qa_instance.TestInstanceExportNoTarget, instance) expnode = qa_config.AcquireNode(exclude=pnode) @@ -319,7 +319,7 @@ def RunExportImportTests(instance, pnode, snode): RunTest(qa_instance.TestBackupList, expnode) - if qa_config.TestEnabled('instance-import'): + if qa_config.TestEnabled("instance-import"): newinst = qa_config.AcquireInstance() try: RunTest(qa_instance.TestInstanceImport, pnode, newinst, @@ -373,7 +373,7 @@ def RunHardwareFailureTests(instance, pnode, snode): RunTestIf(["instance-migrate", "rapi"], qa_rapi.TestRapiInstanceMigrate, instance) - if qa_config.TestEnabled('instance-replace-disks'): + if qa_config.TestEnabled("instance-replace-disks"): othernode = qa_config.AcquireNode(exclude=[pnode, snode]) try: RunTest(qa_instance.TestReplaceDisks, @@ -448,7 +448,7 @@ def RunQa(): del instance multinode_tests = [ - ('instance-add-drbd-disk', + ("instance-add-drbd-disk", qa_instance.TestInstanceAddWithDrbdDisk), ] @@ -462,7 +462,7 @@ def RunQa(): RunTest(qa_group.TestAssignNodesIncludingSplit, constants.INITIAL_NODE_GROUP_NAME, pnode["primary"], snode["primary"]) - if qa_config.TestEnabled('instance-convert-disk'): + if qa_config.TestEnabled("instance-convert-disk"): RunTest(qa_instance.TestInstanceShutdown, instance) RunTest(qa_instance.TestInstanceConvertDisk, instance, snode) RunTest(qa_instance.TestInstanceStartup, instance) @@ -503,7 +503,7 @@ def main(): """ parser = optparse.OptionParser(usage="%prog [options] <config-file>") - parser.add_option('--yes-do-it', dest='yes_do_it', + parser.add_option("--yes-do-it", dest="yes_do_it", action="store_true", help="Really execute the tests") (qa_config.options, args) = parser.parse_args() @@ -527,5 +527,5 @@ def main(): finally: qa_utils.CloseMultiplexers() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/qa/qa_cluster.py b/qa/qa_cluster.py index cca7133a7fe3500b21caba379958573332c75cd5..ee12557bff42041813b7565445697c219a17205b 100644 --- a/qa/qa_cluster.py +++ b/qa/qa_cluster.py @@ -76,39 +76,39 @@ def TestClusterInit(rapi_user, rapi_secret): fh.close() # Initialize cluster - cmd = ['gnt-cluster', 'init'] + cmd = ["gnt-cluster", "init"] cmd.append("--primary-ip-version=%d" % qa_config.get("primary_ip_version", 4)) - if master.get('secondary', None): - cmd.append('--secondary-ip=%s' % master['secondary']) + if master.get("secondary", None): + cmd.append("--secondary-ip=%s" % master["secondary"]) - bridge = qa_config.get('bridge', None) + bridge = qa_config.get("bridge", None) if bridge: - cmd.append('--bridge=%s' % bridge) - cmd.append('--master-netdev=%s' % bridge) + cmd.append("--bridge=%s" % bridge) + cmd.append("--master-netdev=%s" % bridge) - htype = qa_config.get('enabled-hypervisors', None) + htype = qa_config.get("enabled-hypervisors", None) if htype: - cmd.append('--enabled-hypervisors=%s' % htype) + cmd.append("--enabled-hypervisors=%s" % htype) - cmd.append(qa_config.get('name')) + cmd.append(qa_config.get("name")) AssertCommand(cmd) def TestClusterRename(): """gnt-cluster rename""" - cmd = ['gnt-cluster', 'rename', '-f'] + cmd = ["gnt-cluster", "rename", "-f"] - original_name = qa_config.get('name') - rename_target = qa_config.get('rename', None) + original_name = qa_config.get("name") + rename_target = qa_config.get("rename", None) if rename_target is None: print qa_utils.FormatError('"rename" entry is missing') return - cmd_verify = ['gnt-cluster', 'verify'] + cmd_verify = ["gnt-cluster", "verify"] for data in [ cmd + [rename_target], @@ -332,19 +332,19 @@ def TestClusterBurnin(): """Burnin""" master = qa_config.GetMasterNode() - options = qa_config.get('options', {}) - disk_template = options.get('burnin-disk-template', 'drbd') - parallel = options.get('burnin-in-parallel', False) - check_inst = options.get('burnin-check-instances', False) - do_rename = options.get('burnin-rename', '') - do_reboot = options.get('burnin-reboot', True) + options = qa_config.get("options", {}) + disk_template = options.get("burnin-disk-template", "drbd") + parallel = options.get("burnin-in-parallel", False) + check_inst = options.get("burnin-check-instances", False) + do_rename = options.get("burnin-rename", "") + do_reboot = options.get("burnin-reboot", True) reboot_types = options.get("reboot-types", constants.REBOOT_TYPES) # Get as many instances as we need instances = [] try: try: - num = qa_config.get('options', {}).get('burnin-instances', 1) + num = qa_config.get("options", {}).get("burnin-instances", 1) for _ in range(0, num): instances.append(qa_config.AcquireInstance()) except qa_error.OutOfInstancesError: @@ -353,26 +353,26 @@ def TestClusterBurnin(): if len(instances) < 1: raise qa_error.Error("Burnin needs at least one instance") - script = qa_utils.UploadFile(master['primary'], '../tools/burnin') + script = qa_utils.UploadFile(master["primary"], "../tools/burnin") try: # Run burnin cmd = [script, - '--os=%s' % qa_config.get('os'), - '--disk-size=%s' % ",".join(qa_config.get('disk')), - '--disk-growth=%s' % ",".join(qa_config.get('disk-growth')), - '--disk-template=%s' % disk_template] + "--os=%s" % qa_config.get("os"), + "--disk-size=%s" % ",".join(qa_config.get("disk")), + "--disk-growth=%s" % ",".join(qa_config.get("disk-growth")), + "--disk-template=%s" % disk_template] if parallel: - cmd.append('--parallel') - cmd.append('--early-release') + cmd.append("--parallel") + cmd.append("--early-release") if check_inst: - cmd.append('--http-check') + cmd.append("--http-check") if do_rename: - cmd.append('--rename=%s' % do_rename) + cmd.append("--rename=%s" % do_rename) if not do_reboot: - cmd.append('--no-reboot') + cmd.append("--no-reboot") else: - cmd.append('--reboot-types=%s' % ",".join(reboot_types)) - cmd += [inst['name'] for inst in instances] + cmd.append("--reboot-types=%s" % ",".join(reboot_types)) + cmd += [inst["name"] for inst in instances] AssertCommand(cmd) finally: AssertCommand(["rm", "-f", script]) @@ -439,7 +439,7 @@ def TestClusterCopyfile(): f.seek(0) # Upload file to master node - testname = qa_utils.UploadFile(master['primary'], f.name) + testname = qa_utils.UploadFile(master["primary"], f.name) try: # Copy file to all nodes AssertCommand(["gnt-cluster", "copyfile", testname]) @@ -452,8 +452,8 @@ def TestClusterCommand(): """gnt-cluster command""" uniqueid = utils.NewUUID() rfile = "/tmp/gnt%s" % utils.NewUUID() - rcmd = utils.ShellQuoteArgs(['echo', '-n', uniqueid]) - cmd = utils.ShellQuoteArgs(['gnt-cluster', 'command', + rcmd = utils.ShellQuoteArgs(["echo", "-n", uniqueid]) + cmd = utils.ShellQuoteArgs(["gnt-cluster", "command", "%s >%s" % (rcmd, rfile)]) try: diff --git a/qa/qa_config.py b/qa/qa_config.py index 62b0a077869803e481c77a2d7540830a835d9a7b..29c6e6955729a02130f5a798d215d700511d4bb0 100644 --- a/qa/qa_config.py +++ b/qa/qa_config.py @@ -47,9 +47,9 @@ def Load(path): def Validate(): - if len(cfg['nodes']) < 1: + if len(cfg["nodes"]) < 1: raise qa_error.Error("Need at least one node") - if len(cfg['instances']) < 1: + if len(cfg["instances"]) < 1: raise qa_error.Error("Need at least one instance") if len(cfg["disk"]) != len(cfg["disk-growth"]): raise qa_error.Error("Config options 'disk' and 'disk-growth' must have" @@ -72,7 +72,7 @@ def TestEnabled(tests): def GetMasterNode(): - return cfg['nodes'][0] + return cfg["nodes"][0] def AcquireInstance(): @@ -80,20 +80,20 @@ def AcquireInstance(): """ # Filter out unwanted instances - tmp_flt = lambda inst: not inst.get('_used', False) - instances = filter(tmp_flt, cfg['instances']) + tmp_flt = lambda inst: not inst.get("_used", False) + instances = filter(tmp_flt, cfg["instances"]) del tmp_flt if len(instances) == 0: raise qa_error.OutOfInstancesError("No instances left") inst = instances[0] - inst['_used'] = True + inst["_used"] = True return inst def ReleaseInstance(inst): - inst['_used'] = False + inst["_used"] = False def AcquireNode(exclude=None): @@ -105,13 +105,13 @@ def AcquireNode(exclude=None): # Filter out unwanted nodes # TODO: Maybe combine filters if exclude is None: - nodes = cfg['nodes'][:] + nodes = cfg["nodes"][:] elif isinstance(exclude, (list, tuple)): - nodes = filter(lambda node: node not in exclude, cfg['nodes']) + nodes = filter(lambda node: node not in exclude, cfg["nodes"]) else: - nodes = filter(lambda node: node != exclude, cfg['nodes']) + nodes = filter(lambda node: node != exclude, cfg["nodes"]) - tmp_flt = lambda node: node.get('_added', False) or node == master + tmp_flt = lambda node: node.get("_added", False) or node == master nodes = filter(tmp_flt, nodes) del tmp_flt @@ -120,17 +120,17 @@ def AcquireNode(exclude=None): # Get node with least number of uses def compare(a, b): - result = cmp(a.get('_count', 0), b.get('_count', 0)) + result = cmp(a.get("_count", 0), b.get("_count", 0)) if result == 0: - result = cmp(a['primary'], b['primary']) + result = cmp(a["primary"], b["primary"]) return result nodes.sort(cmp=compare) node = nodes[0] - node['_count'] = node.get('_count', 0) + 1 + node["_count"] = node.get("_count", 0) + 1 return node def ReleaseNode(node): - node['_count'] = node.get('_count', 0) - 1 + node["_count"] = node.get("_count", 0) - 1 diff --git a/qa/qa_env.py b/qa/qa_env.py index 211b38679d37562a2527fa233151491521cf288f..e9293b0765c11cbe8aa16c27ec2e17e327436813 100644 --- a/qa/qa_env.py +++ b/qa/qa_env.py @@ -42,23 +42,23 @@ def TestGanetiCommands(): """Test availibility of Ganeti commands. """ - cmds = ( ['gnt-backup', '--version'], - ['gnt-cluster', '--version'], - ['gnt-debug', '--version'], - ['gnt-instance', '--version'], - ['gnt-job', '--version'], - ['gnt-node', '--version'], - ['gnt-os', '--version'], - ['ganeti-masterd', '--version'], - ['ganeti-noded', '--version'], - ['ganeti-rapi', '--version'], - ['ganeti-watcher', '--version'], - ['ganeti-confd', '--version'], + cmds = ( ["gnt-backup", "--version"], + ["gnt-cluster", "--version"], + ["gnt-debug", "--version"], + ["gnt-instance", "--version"], + ["gnt-job", "--version"], + ["gnt-node", "--version"], + ["gnt-os", "--version"], + ["ganeti-masterd", "--version"], + ["ganeti-noded", "--version"], + ["ganeti-rapi", "--version"], + ["ganeti-watcher", "--version"], + ["ganeti-confd", "--version"], ) - cmd = ' && '.join([utils.ShellQuoteArgs(i) for i in cmds]) + cmd = " && ".join([utils.ShellQuoteArgs(i) for i in cmds]) - for node in qa_config.get('nodes'): + for node in qa_config.get("nodes"): AssertCommand(cmd, node=node) @@ -66,7 +66,7 @@ def TestIcmpPing(): """ICMP ping each node. """ - nodes = qa_config.get('nodes') + nodes = qa_config.get("nodes") pingprimary = pingsecondary = "fping" if qa_config.get("primary_ip_version") == 6: diff --git a/qa/qa_instance.py b/qa/qa_instance.py index ab2a0f4a8989dad69dd9aee4179d5df83662080c..4747839eb0ba9d00803496d6e49152e4d4922d0e 100644 --- a/qa/qa_instance.py +++ b/qa/qa_instance.py @@ -42,8 +42,8 @@ def _GetDiskStatePath(disk): def _GetGenericAddParameters(): - params = ['-B', '%s=%s' % (constants.BE_MEMORY, qa_config.get('mem'))] - for idx, size in enumerate(qa_config.get('disk')): + params = ["-B", "%s=%s" % (constants.BE_MEMORY, qa_config.get("mem"))] + for idx, size in enumerate(qa_config.get("disk")): params.extend(["--disk", "%s:size=%s" % (idx, size)]) return params @@ -51,12 +51,12 @@ def _GetGenericAddParameters(): def _DiskTest(node, disk_template): instance = qa_config.AcquireInstance() try: - cmd = (['gnt-instance', 'add', - '--os-type=%s' % qa_config.get('os'), - '--disk-template=%s' % disk_template, - '--node=%s' % node] + + cmd = (["gnt-instance", "add", + "--os-type=%s" % qa_config.get("os"), + "--disk-template=%s" % disk_template, + "--node=%s" % node] + _GetGenericAddParameters()) - cmd.append(instance['name']) + cmd.append(instance["name"]) AssertCommand(cmd) @@ -70,13 +70,13 @@ def _DiskTest(node, disk_template): def TestInstanceAddWithPlainDisk(node): """gnt-instance add -t plain""" - return _DiskTest(node['primary'], 'plain') + return _DiskTest(node["primary"], "plain") def TestInstanceAddWithDrbdDisk(node, node2): """gnt-instance add -t drbd""" - return _DiskTest("%s:%s" % (node['primary'], node2['primary']), - 'drbd') + return _DiskTest("%s:%s" % (node["primary"], node2["primary"]), + "drbd") def TestInstanceRemove(instance): @@ -98,7 +98,7 @@ def TestInstanceShutdown(instance): def TestInstanceReboot(instance): """gnt-instance reboot""" - options = qa_config.get('options', {}) + options = qa_config.get("options", {}) reboot_types = options.get("reboot-types", constants.REBOOT_TYPES) name = instance["name"] for rtype in reboot_types: @@ -163,7 +163,7 @@ def TestInstanceRename(rename_source, rename_target): def TestInstanceFailover(instance): """gnt-instance failover""" - cmd = ['gnt-instance', 'failover', '--force', instance['name']] + cmd = ["gnt-instance", "failover", "--force", instance["name"]] # failover ... AssertCommand(cmd) # ... and back @@ -196,7 +196,7 @@ def TestInstanceModify(instance): test_kernel = "/sbin/init" test_initrd = test_kernel - orig_memory = qa_config.get('mem') + orig_memory = qa_config.get("mem") #orig_bridge = qa_config.get("bridge", "xen-br0") args = [ ["-B", "%s=128" % constants.BE_MEMORY], @@ -255,7 +255,7 @@ def TestReplaceDisks(instance, pnode, snode, othernode): # due to unused pnode arg # FIXME: should be removed from the function completely def buildcmd(args): - cmd = ['gnt-instance', 'replace-disks'] + cmd = ["gnt-instance", "replace-disks"] cmd.extend(args) cmd.append(instance["name"]) return cmd @@ -297,15 +297,15 @@ def TestInstanceExportNoTarget(instance): def TestInstanceImport(node, newinst, expnode, name): """gnt-backup import""" - cmd = (['gnt-backup', 'import', - '--disk-template=plain', - '--no-ip-check', - '--net', '0:mac=generate', - '--src-node=%s' % expnode['primary'], - '--src-dir=%s/%s' % (constants.EXPORT_DIR, name), - '--node=%s' % node['primary']] + + cmd = (["gnt-backup", "import", + "--disk-template=plain", + "--no-ip-check", + "--net", "0:mac=generate", + "--src-node=%s" % expnode["primary"], + "--src-dir=%s/%s" % (constants.EXPORT_DIR, name), + "--node=%s" % node["primary"]] + _GetGenericAddParameters()) - cmd.append(newinst['name']) + cmd.append(newinst["name"]) AssertCommand(cmd) @@ -324,16 +324,16 @@ def _TestInstanceDiskFailure(instance, node, node2, onmaster): node2_full = qa_utils.ResolveNodeName(node2) print qa_utils.FormatInfo("Getting physical disk names") - cmd = ['gnt-node', 'volumes', '--separator=|', '--no-headers', - '--output=node,phys,instance', - node['primary'], node2['primary']] - output = qa_utils.GetCommandOutput(master['primary'], sq(cmd)) + cmd = ["gnt-node", "volumes", "--separator=|", "--no-headers", + "--output=node,phys,instance", + node["primary"], node2["primary"]] + output = qa_utils.GetCommandOutput(master["primary"], sq(cmd)) # Get physical disk names - re_disk = re.compile(r'^/dev/([a-z]+)\d+$') + re_disk = re.compile(r"^/dev/([a-z]+)\d+$") node2disk = {} for line in output.splitlines(): - (node_name, phys, inst) = line.split('|') + (node_name, phys, inst) = line.split("|") if inst == instance_full: if node_name not in node2disk: node2disk[node_name] = [] @@ -359,19 +359,19 @@ def _TestInstanceDiskFailure(instance, node, node2, onmaster): AssertCommand(" && ".join(cmds), node=node_name) print qa_utils.FormatInfo("Getting device paths") - cmd = ['gnt-instance', 'activate-disks', instance['name']] - output = qa_utils.GetCommandOutput(master['primary'], sq(cmd)) + cmd = ["gnt-instance", "activate-disks", instance["name"]] + output = qa_utils.GetCommandOutput(master["primary"], sq(cmd)) devpath = [] for line in output.splitlines(): - (_, _, tmpdevpath) = line.split(':') + (_, _, tmpdevpath) = line.split(":") devpath.append(tmpdevpath) print devpath print qa_utils.FormatInfo("Getting drbd device paths") - cmd = ['gnt-instance', 'info', instance['name']] - output = qa_utils.GetCommandOutput(master['primary'], sq(cmd)) - pattern = (r'\s+-\s+sd[a-z]+,\s+type:\s+drbd8?,\s+.*$' - r'\s+primary:\s+(/dev/drbd\d+)\s+') + cmd = ["gnt-instance", "info", instance["name"]] + output = qa_utils.GetCommandOutput(master["primary"], sq(cmd)) + pattern = (r"\s+-\s+sd[a-z]+,\s+type:\s+drbd8?,\s+.*$" + r"\s+primary:\s+(/dev/drbd\d+)\s+") drbddevs = re.findall(pattern, output, re.M) print drbddevs diff --git a/qa/qa_node.py b/qa/qa_node.py index 6859cb980f745a5ce4e54326f35c832defedd456..434de3832386f58766eb8a96844a043a72d92e67 100644 --- a/qa/qa_node.py +++ b/qa/qa_node.py @@ -36,32 +36,32 @@ from qa_utils import AssertCommand, AssertEqual def _NodeAdd(node, readd=False): - if not readd and node.get('_added', False): - raise qa_error.Error("Node %s already in cluster" % node['primary']) - elif readd and not node.get('_added', False): - raise qa_error.Error("Node %s not yet in cluster" % node['primary']) - - cmd = ['gnt-node', 'add', "--no-ssh-key-check"] - if node.get('secondary', None): - cmd.append('--secondary-ip=%s' % node['secondary']) + if not readd and node.get("_added", False): + raise qa_error.Error("Node %s already in cluster" % node["primary"]) + elif readd and not node.get("_added", False): + raise qa_error.Error("Node %s not yet in cluster" % node["primary"]) + + cmd = ["gnt-node", "add", "--no-ssh-key-check"] + if node.get("secondary", None): + cmd.append("--secondary-ip=%s" % node["secondary"]) if readd: - cmd.append('--readd') - cmd.append(node['primary']) + cmd.append("--readd") + cmd.append(node["primary"]) AssertCommand(cmd) - node['_added'] = True + node["_added"] = True def _NodeRemove(node): AssertCommand(["gnt-node", "remove", node["primary"]]) - node['_added'] = False + node["_added"] = False def TestNodeAddAll(): """Adding all nodes to cluster.""" master = qa_config.GetMasterNode() - for node in qa_config.get('nodes'): + for node in qa_config.get("nodes"): if node != master: _NodeAdd(node, readd=False) @@ -73,15 +73,15 @@ def MarkNodeAddedAll(): """ master = qa_config.GetMasterNode() - for node in qa_config.get('nodes'): + for node in qa_config.get("nodes"): if node != master: - node['_added'] = True + node["_added"] = True def TestNodeRemoveAll(): """Removing all nodes from cluster.""" master = qa_config.GetMasterNode() - for node in qa_config.get('nodes'): + for node in qa_config.get("nodes"): if node != master: _NodeRemove(node) diff --git a/qa/qa_os.py b/qa/qa_os.py index b050c16917a1525d5999858b11dff169bfb90e54..5868a392e30e36fb9702649c4e552f34f83f5c06 100644 --- a/qa/qa_os.py +++ b/qa/qa_os.py @@ -56,14 +56,14 @@ def TestOsDiagnose(): def _TestOsModify(hvp_dict, fail=False): """gnt-os modify""" - cmd = ['gnt-os', 'modify'] + cmd = ["gnt-os", "modify"] for hv_name, hv_params in hvp_dict.items(): - cmd.append('-H') + cmd.append("-H") options = [] for key, value in hv_params.items(): options.append("%s=%s" % (key, value)) - cmd.append('%s:%s' % (hv_name, ','.join(options))) + cmd.append("%s:%s" % (hv_name, ",".join(options))) cmd.append(_TEMP_OS_NAME) AssertCommand(cmd, fail=fail) @@ -99,7 +99,7 @@ def _SetupTempOs(node, dirname, valid): parts.append(sq(["echo", str(constants.OS_API_V10)]) + " >ganeti_api_version") - cmd = ' && '.join(parts) + cmd = " && ".join(parts) print qa_utils.FormatInfo("Setting up %s with %s OS definition" % (node["primary"], diff --git a/qa/qa_rapi.py b/qa/qa_rapi.py index e06d5e8de372b0f9d4e9f4768ee7cc27060c8110..407279efcce15321864d637e03d2c2f912d7d8ed 100644 --- a/qa/qa_rapi.py +++ b/qa/qa_rapi.py @@ -118,7 +118,7 @@ def Enabled(): """Return whether remote API tests should be run. """ - return qa_config.TestEnabled('rapi') + return qa_config.TestEnabled("rapi") def _DoTests(uris): @@ -144,7 +144,7 @@ def _DoTests(uris): def _VerifyReturnsJob(data): - AssertMatch(data, r'^\d+$') + AssertMatch(data, r"^\d+$") def TestVersion(): @@ -152,7 +152,7 @@ def TestVersion(): """ _DoTests([ - ("/version", constants.RAPI_VERSION, 'GET', None), + ("/version", constants.RAPI_VERSION, "GET", None), ]) @@ -193,16 +193,16 @@ def TestEmptyCluster(): AssertIn(field, group) _DoTests([ - ("/", None, 'GET', None), - ("/2/info", _VerifyInfo, 'GET', None), - ("/2/tags", None, 'GET', None), - ("/2/nodes", _VerifyNodes, 'GET', None), - ("/2/nodes?bulk=1", _VerifyNodesBulk, 'GET', None), - ("/2/groups", _VerifyGroups, 'GET', None), - ("/2/groups?bulk=1", _VerifyGroupsBulk, 'GET', None), - ("/2/instances", [], 'GET', None), - ("/2/instances?bulk=1", [], 'GET', None), - ("/2/os", None, 'GET', None), + ("/", None, "GET", None), + ("/2/info", _VerifyInfo, "GET", None), + ("/2/tags", None, "GET", None), + ("/2/nodes", _VerifyNodes, "GET", None), + ("/2/nodes?bulk=1", _VerifyNodesBulk, "GET", None), + ("/2/groups", _VerifyGroups, "GET", None), + ("/2/groups?bulk=1", _VerifyGroupsBulk, "GET", None), + ("/2/instances", [], "GET", None), + ("/2/instances?bulk=1", [], "GET", None), + ("/2/os", None, "GET", None), ]) # Test HTTP Not Found @@ -358,13 +358,13 @@ def TestInstance(instance): _VerifyInstance(instance_data) _DoTests([ - ("/2/instances/%s" % instance["name"], _VerifyInstance, 'GET', None), - ("/2/instances", _VerifyInstancesList, 'GET', None), - ("/2/instances?bulk=1", _VerifyInstancesBulk, 'GET', None), + ("/2/instances/%s" % instance["name"], _VerifyInstance, "GET", None), + ("/2/instances", _VerifyInstancesList, "GET", None), + ("/2/instances?bulk=1", _VerifyInstancesBulk, "GET", None), ("/2/instances/%s/activate-disks" % instance["name"], - _VerifyReturnsJob, 'PUT', None), + _VerifyReturnsJob, "PUT", None), ("/2/instances/%s/deactivate-disks" % instance["name"], - _VerifyReturnsJob, 'PUT', None), + _VerifyReturnsJob, "PUT", None), ]) # Test OpBackupPrepare @@ -399,9 +399,9 @@ def TestNode(node): _VerifyNode(node_data) _DoTests([ - ("/2/nodes/%s" % node["primary"], _VerifyNode, 'GET', None), - ("/2/nodes", _VerifyNodesList, 'GET', None), - ("/2/nodes?bulk=1", _VerifyNodesBulk, 'GET', None), + ("/2/nodes/%s" % node["primary"], _VerifyNode, "GET", None), + ("/2/nodes", _VerifyNodesList, "GET", None), + ("/2/nodes?bulk=1", _VerifyNodesBulk, "GET", None), ]) @@ -433,7 +433,7 @@ def TestTags(kind, name, tags): # Retrieve tags _DoTests([ - (uri, _VerifyTags, 'GET', None), + (uri, _VerifyTags, "GET", None), ]) # Remove tags diff --git a/qa/qa_utils.py b/qa/qa_utils.py index 1beb77fa5356c37f8d949712111c7d023acd0f17..441bc8768ea0e9f34ac7deac5ffb2ffb642c6ab8 100644 --- a/qa/qa_utils.py +++ b/qa/qa_utils.py @@ -82,7 +82,7 @@ def AssertIn(item, sequence): """ if item not in sequence: - raise qa_error.Error('%r not in %r' % (item, sequence)) + raise qa_error.Error("%r not in %r" % (item, sequence)) def AssertNotIn(item, sequence): @@ -90,7 +90,7 @@ def AssertNotIn(item, sequence): """ if item in sequence: - raise qa_error.Error('%r in %r' % (item, sequence)) + raise qa_error.Error("%r in %r" % (item, sequence)) def AssertEqual(first, second): @@ -98,7 +98,7 @@ def AssertEqual(first, second): """ if not first == second: - raise qa_error.Error('%r == %r' % (first, second)) + raise qa_error.Error("%r == %r" % (first, second)) def AssertNotEqual(first, second): @@ -106,7 +106,7 @@ def AssertNotEqual(first, second): """ if not first != second: - raise qa_error.Error('%r != %r' % (first, second)) + raise qa_error.Error("%r != %r" % (first, second)) def AssertMatch(string, pattern): @@ -178,18 +178,18 @@ def GetSSHCommand(node, cmd, strict=True, opts=None, tty=True): args.append("-t") if strict: - tmp = 'yes' + tmp = "yes" else: - tmp = 'no' - args.append('-oStrictHostKeyChecking=%s' % tmp) - args.append('-oClearAllForwardings=yes') - args.append('-oForwardAgent=yes') + tmp = "no" + args.append("-oStrictHostKeyChecking=%s" % tmp) + args.append("-oClearAllForwardings=yes") + args.append("-oForwardAgent=yes") if opts: args.extend(opts) if node in _MULTIPLEXERS: spath = _MULTIPLEXERS[node][0] - args.append('-oControlPath=%s' % spath) - args.append('-oControlMaster=no') + args.append("-oControlPath=%s" % spath) + args.append("-oControlMaster=no") args.append(node) if cmd: args.append(cmd) @@ -265,7 +265,7 @@ def UploadFile(node, src): 'cat > "${tmp}" && ' 'echo "${tmp}"') % mode - f = open(src, 'r') + f = open(src, "r") try: p = subprocess.Popen(GetSSHCommand(node, cmd), shell=False, stdin=f, stdout=subprocess.PIPE) @@ -325,9 +325,9 @@ def _ResolveName(cmd, key): """ master = qa_config.GetMasterNode() - output = GetCommandOutput(master['primary'], utils.ShellQuoteArgs(cmd)) + output = GetCommandOutput(master["primary"], utils.ShellQuoteArgs(cmd)) for line in output.splitlines(): - (lkey, lvalue) = line.split(':', 1) + (lkey, lvalue) = line.split(":", 1) if lkey == key: return lvalue.lstrip() raise KeyError("Key not found") @@ -340,16 +340,16 @@ def ResolveInstanceName(instance): @param instance: Instance name """ - return _ResolveName(['gnt-instance', 'info', instance], - 'Instance name') + return _ResolveName(["gnt-instance", "info", instance], + "Instance name") def ResolveNodeName(node): """Gets the full name of a node. """ - return _ResolveName(['gnt-node', 'info', node['primary']], - 'Node name') + return _ResolveName(["gnt-node", "info", node["primary"]], + "Node name") def GetNodeInstances(node, secondaries=False): @@ -360,15 +360,15 @@ def GetNodeInstances(node, secondaries=False): node_name = ResolveNodeName(node) # Get list of all instances - cmd = ['gnt-instance', 'list', '--separator=:', '--no-headers', - '--output=name,pnode,snodes'] - output = GetCommandOutput(master['primary'], utils.ShellQuoteArgs(cmd)) + cmd = ["gnt-instance", "list", "--separator=:", "--no-headers", + "--output=name,pnode,snodes"] + output = GetCommandOutput(master["primary"], utils.ShellQuoteArgs(cmd)) instances = [] for line in output.splitlines(): - (name, pnode, snodes) = line.split(':', 2) + (name, pnode, snodes) = line.split(":", 2) if ((not secondaries and pnode == node_name) or - (secondaries and node_name in snodes.split(','))): + (secondaries and node_name in snodes.split(","))): instances.append(name) return instances