Commit d0c8c01d authored by Iustin Pop's avatar Iustin Pop

Most boring patch ever

s/'/"/ in (hopefully) the right places.
Signed-off-by: default avatarIustin Pop <iustin@google.com>
Reviewed-by: default avatarMichael Hanselmann <hansmi@google.com>
parent 0fa044e7
......@@ -421,7 +421,7 @@ def LeaveCluster(modify_ssh_setup):
result.cmd, result.exit_code, result.output)
# Raise a custom exception (handled in ganeti-noded)
raise errors.QuitGanetiException(True, 'Shutdown scheduled')
raise errors.QuitGanetiException(True, "Shutdown scheduled")
def GetNodeInfo(vgname, hypervisor_type):
......@@ -449,8 +449,8 @@ def GetNodeInfo(vgname, hypervisor_type):
if vginfo:
vg_free = int(round(vginfo[0][0], 0))
vg_size = int(round(vginfo[0][1], 0))
outputarray['vg_size'] = vg_size
outputarray['vg_free'] = vg_free
outputarray["vg_size"] = vg_size
outputarray["vg_free"] = vg_free
if hypervisor_type is not None:
hyper = hypervisor.GetHypervisor(hypervisor_type)
......@@ -707,7 +707,7 @@ def GetVolumeList(vg_names):
"""
lvs = {}
sep = '|'
sep = "|"
if not vg_names:
vg_names = []
result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
......@@ -723,9 +723,9 @@ def GetVolumeList(vg_names):
logging.error("Invalid line returned from lvs output: '%s'", line)
continue
vg_name, name, size, attr = match.groups()
inactive = attr[4] == '-'
online = attr[5] == 'o'
virtual = attr[0] == 'v'
inactive = attr[4] == "-"
online = attr[5] == "o"
virtual = attr[0] == "v"
if virtual:
# we don't want to report such volumes as existing, since they
# don't really hold data
......@@ -773,20 +773,20 @@ def NodeVolumes():
result.output)
def parse_dev(dev):
return dev.split('(')[0]
return dev.split("(")[0]
def handle_dev(dev):
return [parse_dev(x) for x in dev.split(",")]
def map_line(line):
line = [v.strip() for v in line]
return [{'name': line[0], 'size': line[1],
'dev': dev, 'vg': line[3]} for dev in handle_dev(line[2])]
return [{"name": line[0], "size": line[1],
"dev": dev, "vg": line[3]} for dev in handle_dev(line[2])]
all_devs = []
for line in result.stdout.splitlines():
if line.count('|') >= 3:
all_devs.extend(map_line(line.split('|')))
if line.count("|") >= 3:
all_devs.extend(map_line(line.split("|")))
else:
logging.warning("Strange line in the output from lvs: '%s'", line)
return all_devs
......@@ -851,9 +851,9 @@ def GetInstanceInfo(instance, hname):
iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance)
if iinfo is not None:
output['memory'] = iinfo[2]
output['state'] = iinfo[4]
output['time'] = iinfo[5]
output["memory"] = iinfo[2]
output["state"] = iinfo[4]
output["time"] = iinfo[5]
return output
......@@ -907,16 +907,16 @@ def GetAllInstancesInfo(hypervisor_list):
if iinfo:
for name, _, memory, vcpus, state, times in iinfo:
value = {
'memory': memory,
'vcpus': vcpus,
'state': state,
'time': times,
"memory": memory,
"vcpus": vcpus,
"state": state,
"time": times,
}
if name in output:
# we only check static parameters, like memory and vcpus,
# and not state and time which can change between the
# invocations of the different hypervisors
for key in 'memory', 'vcpus':
for key in "memory", "vcpus":
if value[key] != output[name][key]:
_Fail("Instance %s is running twice"
" with different parameters", name)
......@@ -961,7 +961,7 @@ def InstanceOsAdd(instance, reinstall, debug):
create_env = OSEnvironment(instance, inst_os, debug)
if reinstall:
create_env['INSTANCE_REINSTALL'] = "1"
create_env["INSTANCE_REINSTALL"] = "1"
logfile = _InstanceLogName("add", instance.os, instance.name)
......@@ -993,7 +993,7 @@ def RunRenameInstance(instance, old_name, debug):
inst_os = OSFromDisk(instance.os)
rename_env = OSEnvironment(instance, inst_os, debug)
rename_env['OLD_INSTANCE_NAME'] = old_name
rename_env["OLD_INSTANCE_NAME"] = old_name
logfile = _InstanceLogName("rename", instance.os,
"%s-%s" % (old_name, instance.name))
......@@ -1331,7 +1331,7 @@ def BlockdevCreate(disk, size, owner, on_primary, info):
it's not required to return anything.
"""
# TODO: remove the obsolete 'size' argument
# TODO: remove the obsolete "size" argument
# pylint: disable-msg=W0613
clist = []
if disk.children:
......@@ -1831,7 +1831,7 @@ def BlockdevExport(disk, dest_node, dest_path, cluster_name):
destcmd)
# all commands have been checked, so we're safe to combine them
command = '|'.join([expcmd, utils.ShellQuoteArgs(remotecmd)])
command = "|".join([expcmd, utils.ShellQuoteArgs(remotecmd)])
result = utils.RunCmd(["bash", "-c", command])
......@@ -1925,7 +1925,7 @@ def _ErrnoOrStr(err):
@param err: the exception to format
"""
if hasattr(err, 'errno'):
if hasattr(err, "errno"):
detail = errno.errorcode[err.errno]
else:
detail = str(err)
......@@ -2055,10 +2055,10 @@ def _TryOSFromDisk(name, base_dir=None):
os_files = dict.fromkeys(constants.OS_SCRIPTS)
if max(api_versions) >= constants.OS_API_V15:
os_files[constants.OS_VARIANTS_FILE] = ''
os_files[constants.OS_VARIANTS_FILE] = ""
if max(api_versions) >= constants.OS_API_V20:
os_files[constants.OS_PARAMETERS_FILE] = ''
os_files[constants.OS_PARAMETERS_FILE] = ""
else:
del os_files[constants.OS_SCRIPT_VERIFY]
......@@ -2161,20 +2161,20 @@ def OSCoreEnv(os_name, inst_os, os_params, debug=0):
result = {}
api_version = \
max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions))
result['OS_API_VERSION'] = '%d' % api_version
result['OS_NAME'] = inst_os.name
result['DEBUG_LEVEL'] = '%d' % debug
result["OS_API_VERSION"] = "%d" % api_version
result["OS_NAME"] = inst_os.name
result["DEBUG_LEVEL"] = "%d" % debug
# OS variants
if api_version >= constants.OS_API_V15:
variant = objects.OS.GetVariant(os_name)
if not variant:
variant = inst_os.supported_variants[0]
result['OS_VARIANT'] = variant
result["OS_VARIANT"] = variant
# OS params
for pname, pvalue in os_params.items():
result['OSP_%s' % pname.upper()] = pvalue
result["OSP_%s" % pname.upper()] = pvalue
return result
......@@ -2199,38 +2199,38 @@ def OSEnvironment(instance, inst_os, debug=0):
for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]:
result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr))
result['HYPERVISOR'] = instance.hypervisor
result['DISK_COUNT'] = '%d' % len(instance.disks)
result['NIC_COUNT'] = '%d' % len(instance.nics)
result['INSTANCE_SECONDARY_NODES'] = \
('%s' % " ".join(instance.secondary_nodes))
result["HYPERVISOR"] = instance.hypervisor
result["DISK_COUNT"] = "%d" % len(instance.disks)
result["NIC_COUNT"] = "%d" % len(instance.nics)
result["INSTANCE_SECONDARY_NODES"] = \
("%s" % " ".join(instance.secondary_nodes))
# Disks
for idx, disk in enumerate(instance.disks):
real_disk = _OpenRealBD(disk)
result['DISK_%d_PATH' % idx] = real_disk.dev_path
result['DISK_%d_ACCESS' % idx] = disk.mode
result["DISK_%d_PATH" % idx] = real_disk.dev_path
result["DISK_%d_ACCESS" % idx] = disk.mode
if constants.HV_DISK_TYPE in instance.hvparams:
result['DISK_%d_FRONTEND_TYPE' % idx] = \
result["DISK_%d_FRONTEND_TYPE" % idx] = \
instance.hvparams[constants.HV_DISK_TYPE]
if disk.dev_type in constants.LDS_BLOCK:
result['DISK_%d_BACKEND_TYPE' % idx] = 'block'
result["DISK_%d_BACKEND_TYPE" % idx] = "block"
elif disk.dev_type == constants.LD_FILE:
result['DISK_%d_BACKEND_TYPE' % idx] = \
'file:%s' % disk.physical_id[0]
result["DISK_%d_BACKEND_TYPE" % idx] = \
"file:%s" % disk.physical_id[0]
# NICs
for idx, nic in enumerate(instance.nics):
result['NIC_%d_MAC' % idx] = nic.mac
result["NIC_%d_MAC" % idx] = nic.mac
if nic.ip:
result['NIC_%d_IP' % idx] = nic.ip
result['NIC_%d_MODE' % idx] = nic.nicparams[constants.NIC_MODE]
result["NIC_%d_IP" % idx] = nic.ip
result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE]
if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
result['NIC_%d_BRIDGE' % idx] = nic.nicparams[constants.NIC_LINK]
result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
if nic.nicparams[constants.NIC_LINK]:
result['NIC_%d_LINK' % idx] = nic.nicparams[constants.NIC_LINK]
result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
if constants.HV_NIC_TYPE in instance.hvparams:
result['NIC_%d_FRONTEND_TYPE' % idx] = \
result["NIC_%d_FRONTEND_TYPE" % idx] = \
instance.hvparams[constants.HV_NIC_TYPE]
# HV/BE params
......@@ -2318,46 +2318,46 @@ def FinalizeExport(instance, snap_disks):
config = objects.SerializableConfigParser()
config.add_section(constants.INISECT_EXP)
config.set(constants.INISECT_EXP, 'version', '0')
config.set(constants.INISECT_EXP, 'timestamp', '%d' % int(time.time()))
config.set(constants.INISECT_EXP, 'source', instance.primary_node)
config.set(constants.INISECT_EXP, 'os', instance.os)
config.set(constants.INISECT_EXP, "version", "0")
config.set(constants.INISECT_EXP, "timestamp", "%d" % int(time.time()))
config.set(constants.INISECT_EXP, "source", instance.primary_node)
config.set(constants.INISECT_EXP, "os", instance.os)
config.set(constants.INISECT_EXP, "compression", "none")
config.add_section(constants.INISECT_INS)
config.set(constants.INISECT_INS, 'name', instance.name)
config.set(constants.INISECT_INS, 'memory', '%d' %
config.set(constants.INISECT_INS, "name", instance.name)
config.set(constants.INISECT_INS, "memory", "%d" %
instance.beparams[constants.BE_MEMORY])
config.set(constants.INISECT_INS, 'vcpus', '%d' %
config.set(constants.INISECT_INS, "vcpus", "%d" %
instance.beparams[constants.BE_VCPUS])
config.set(constants.INISECT_INS, 'disk_template', instance.disk_template)
config.set(constants.INISECT_INS, 'hypervisor', instance.hypervisor)
config.set(constants.INISECT_INS, "disk_template", instance.disk_template)
config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor)
config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags()))
nic_total = 0
for nic_count, nic in enumerate(instance.nics):
nic_total += 1
config.set(constants.INISECT_INS, 'nic%d_mac' %
nic_count, '%s' % nic.mac)
config.set(constants.INISECT_INS, 'nic%d_ip' % nic_count, '%s' % nic.ip)
config.set(constants.INISECT_INS, "nic%d_mac" %
nic_count, "%s" % nic.mac)
config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip)
for param in constants.NICS_PARAMETER_TYPES:
config.set(constants.INISECT_INS, 'nic%d_%s' % (nic_count, param),
'%s' % nic.nicparams.get(param, None))
config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param),
"%s" % nic.nicparams.get(param, None))
# TODO: redundant: on load can read nics until it doesn't exist
config.set(constants.INISECT_INS, 'nic_count' , '%d' % nic_total)
config.set(constants.INISECT_INS, "nic_count" , "%d" % nic_total)
disk_total = 0
for disk_count, disk in enumerate(snap_disks):
if disk:
disk_total += 1
config.set(constants.INISECT_INS, 'disk%d_ivname' % disk_count,
('%s' % disk.iv_name))
config.set(constants.INISECT_INS, 'disk%d_dump' % disk_count,
('%s' % disk.physical_id[1]))
config.set(constants.INISECT_INS, 'disk%d_size' % disk_count,
('%d' % disk.size))
config.set(constants.INISECT_INS, 'disk_count' , '%d' % disk_total)
config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
("%s" % disk.iv_name))
config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
("%s" % disk.physical_id[1]))
config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
("%d" % disk.size))
config.set(constants.INISECT_INS, "disk_count" , "%d" % disk_total)
# New-style hypervisor/backend parameters
......
......@@ -600,7 +600,7 @@ class LogicalVolume(BlockDev):
# one line for any non-empty string
logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
return False
out = out[-1].strip().rstrip(',')
out = out[-1].strip().rstrip(",")
out = out.split(",")
if len(out) != 5:
logging.error("Can't parse LVS output, len(%s) != 5", str(out))
......@@ -633,7 +633,7 @@ class LogicalVolume(BlockDev):
self.minor = minor
self.pe_size = pe_size
self.stripe_count = stripes
self._degraded = status[0] == 'v' # virtual volume, i.e. doesn't backing
self._degraded = status[0] == "v" # virtual volume, i.e. doesn't backing
# storage
self.attached = True
return True
......@@ -745,8 +745,8 @@ class LogicalVolume(BlockDev):
BlockDev.SetInfo(self, text)
# Replace invalid characters
text = re.sub('^[^A-Za-z0-9_+.]', '_', text)
text = re.sub('[^-A-Za-z0-9_+.]', '_', text)
text = re.sub("^[^A-Za-z0-9_+.]", "_", text)
text = re.sub("[^-A-Za-z0-9_+.]", "_", text)
# Only up to 128 characters are allowed
text = text[:128]
......@@ -971,14 +971,14 @@ class BaseDRBD(BlockDev): # pylint: disable-msg=W0223
first_line)
values = version.groups()
retval = {'k_major': int(values[0]),
'k_minor': int(values[1]),
'k_point': int(values[2]),
'api': int(values[3]),
'proto': int(values[4]),
retval = {"k_major": int(values[0]),
"k_minor": int(values[1]),
"k_point": int(values[2]),
"api": int(values[3]),
"proto": int(values[4]),
}
if values[5] is not None:
retval['proto2'] = values[5]
retval["proto2"] = values[5]
return retval
......@@ -1113,10 +1113,10 @@ class DRBD8(BaseDRBD):
super(DRBD8, self).__init__(unique_id, children, size)
self.major = self._DRBD_MAJOR
version = self._GetVersion(self._GetProcData())
if version['k_major'] != 8 :
if version["k_major"] != 8 :
_ThrowError("Mismatch in DRBD kernel version and requested ganeti"
" usage: kernel is %s.%s, ganeti wants 8.x",
version['k_major'], version['k_minor'])
version["k_major"], version["k_minor"])
if (self._lhost is not None and self._lhost == self._rhost and
self._lport == self._rport):
......@@ -1210,7 +1210,7 @@ class DRBD8(BaseDRBD):
pyp.Optional(pyp.restOfLine).suppress())
# an entire section
section_name = pyp.Word(pyp.alphas + '_')
section_name = pyp.Word(pyp.alphas + "_")
section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
......@@ -1343,18 +1343,18 @@ class DRBD8(BaseDRBD):
# what we aim here is to revert back to the 'drain' method of
# disk flushes and to disable metadata barriers, in effect going
# back to pre-8.0.7 behaviour
vmaj = version['k_major']
vmin = version['k_minor']
vrel = version['k_point']
vmaj = version["k_major"]
vmin = version["k_minor"]
vrel = version["k_point"]
assert vmaj == 8
if vmin == 0: # 8.0.x
if vrel >= 12:
args.extend(['-i', '-m'])
args.extend(["-i", "-m"])
elif vmin == 2: # 8.2.x
if vrel >= 7:
args.extend(['-i', '-m'])
args.extend(["-i", "-m"])
elif vmaj >= 3: # 8.3.x or newer
args.extend(['-i', '-a', 'm'])
args.extend(["-i", "-a", "m"])
result = utils.RunCmd(args)
if result.failed:
_ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
......@@ -2102,7 +2102,7 @@ class PersistentBlockDevice(BlockDev):
if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
raise ValueError("Invalid configuration data %s" % str(unique_id))
self.dev_path = unique_id[1]
if not os.path.realpath(self.dev_path).startswith('/dev/'):
if not os.path.realpath(self.dev_path).startswith("/dev/"):
raise ValueError("Full path '%s' lies outside /dev" %
os.path.realpath(self.dev_path))
# TODO: this is just a safety guard checking that we only deal with devices
......
......@@ -517,7 +517,7 @@ def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
"""
if ":" not in value:
ident, rest = value, ''
ident, rest = value, ""
else:
ident, rest = value.split(":", 1)
......@@ -621,7 +621,7 @@ SEP_OPT = cli_option("--separator", default=None,
" (defaults to one space)"))
USEUNITS_OPT = cli_option("--units", default=None,
dest="units", choices=('h', 'm', 'g', 't'),
dest="units", choices=("h", "m", "g", "t"),
help="Specify units for output (one of h/m/g/t)")
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
......@@ -1417,8 +1417,8 @@ def SplitNodeOption(value):
"""Splits the value of a --node option.
"""
if value and ':' in value:
return value.split(':', 1)
if value and ":" in value:
return value.split(":", 1)
else:
return (value, None)
......@@ -1435,7 +1435,7 @@ def CalculateOSNames(os_name, os_variants):
"""
if os_variants:
return ['%s+%s' % (os_name, v) for v in os_variants]
return ["%s+%s" % (os_name, v) for v in os_variants]
else:
return [os_name]
......@@ -1477,12 +1477,12 @@ def AskUser(text, choices=None):
"""
if choices is None:
choices = [('y', True, 'Perform the operation'),
('n', False, 'Do not perform the operation')]
choices = [("y", True, "Perform the operation"),
("n", False, "Do not perform the operation")]
if not choices or not isinstance(choices, list):
raise errors.ProgrammerError("Invalid choices argument to AskUser")
for entry in choices:
if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
raise errors.ProgrammerError("Invalid choices element to AskUser")
answer = choices[-1][1]
......@@ -1497,18 +1497,18 @@ def AskUser(text, choices=None):
try:
chars = [entry[0] for entry in choices]
chars[-1] = "[%s]" % chars[-1]
chars.append('?')
chars.append("?")
maps = dict([(entry[0], entry[1]) for entry in choices])
while True:
f.write(text)
f.write('\n')
f.write("\n")
f.write("/".join(chars))
f.write(": ")
line = f.readline(2).strip().lower()
if line in maps:
answer = maps[line]
break
elif line == '?':
elif line == "?":
for entry in choices:
f.write(" %s - %s\n" % (entry[0], entry[2]))
f.write("\n")
......@@ -1965,7 +1965,7 @@ def FormatError(err):
retcode = 0
else:
obuf.write("Unhandled exception: %s" % msg)
return retcode, obuf.getvalue().rstrip('\n')
return retcode, obuf.getvalue().rstrip("\n")
def GenericMain(commands, override=None, aliases=None):
......@@ -2381,7 +2381,7 @@ def GenerateTable(headers, fields, separator, data,
if separator is None:
mlens = [0 for name in fields]
format_str = ' '.join(format_fields)
format_str = " ".join(format_fields)
else:
format_str = separator.replace("%", "%%").join(format_fields)
......@@ -2420,7 +2420,7 @@ def GenerateTable(headers, fields, separator, data,
for line in data:
args = []
if line is None:
line = ['-' for _ in fields]
line = ["-" for _ in fields]
for idx in range(len(fields)):
if separator is None:
args.append(mlens[idx])
......@@ -2826,7 +2826,7 @@ def FormatTimestamp(ts):
"""
if not isinstance (ts, (tuple, list)) or len(ts) != 2:
return '?'
return "?"
sec, usec = ts
return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
......@@ -2849,11 +2849,11 @@ def ParseTimespec(value):
if not value:
raise errors.OpPrereqError("Empty time specification passed")
suffix_map = {
's': 1,
'm': 60,
'h': 3600,
'd': 86400,
'w': 604800,
"s": 1,
"m": 60,
"h": 3600,
"d": 86400,
"w": 604800,
}
if value[-1] not in suffix_map:
try:
......@@ -2966,7 +2966,7 @@ def _ToStream(stream, txt, *args):
stream.write(txt % args)
else:
stream.write(txt)
stream.write('\n')
stream.write("\n")
stream.flush()
except IOError, err:
if err.errno == errno.EPIPE:
......
......@@ -1242,7 +1242,7 @@ def Epo(opts, args):
commands = {
'init': (
"init": (
InitCluster, [ArgHost(min=1, max=1)],
[BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT,
......@@ -1252,75 +1252,75 @@ commands = {
DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT],
"[opts...] <cluster_name>", "Initialises a new cluster configuration"),
'destroy': (
"destroy": (
DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
"", "Destroy cluster"),
'rename': (
"rename": (
RenameCluster, [ArgHost(min=1, max=1)],
[FORCE_OPT, DRY_RUN_OPT],
"<new_name>",
"Renames the cluster"),
'redist-conf': (
"redist-conf": (
RedistributeConfig, ARGS_NONE, [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
"", "Forces a push of the configuration file and ssconf files"
" to the nodes in the cluster"),
'verify': (
"verify": (
VerifyCluster, ARGS_NONE,
[VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT],
"", "Does a check on the cluster configuration"),
'verify-disks': (
"verify-disks": (
VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
"", "Does a check on the cluster disk status"),
'repair-disk-sizes': (
"repair-disk-sizes": (
RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
"", "Updates mismatches in recorded disk sizes"),
'master-failover': (
"master-failover": (
MasterFailover, ARGS_NONE, [NOVOTING_OPT],
"", "Makes the current node the master"),
'master-ping': (
"master-ping": (
MasterPing, ARGS_NONE, [],
"", "Checks if the master is alive"),
'version': (
"version": (
ShowClusterVersion, ARGS_NONE, [],
"", "Shows the cluster version"),
'getmaster': (
"getmaster": (
ShowClusterMaster, ARGS_NONE, [],
"", "Shows the cluster master"),
'copyfile': (
"copyfile": (
ClusterCopyFile, [ArgFile(min=1, max=1)],
[NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT],
"[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
'command': (
"command": (
RunClusterCommand, [ArgCommand(min=1)],
[NODE_LIST_OPT, NODEGROUP_OPT],
"[-n node...] <command>", "Runs a command on all (or only some) nodes"),
'info': (
"info": (
ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
"[--roman]", "Show cluster configuration"),
'list-tags': (
"list-tags": (
ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
'add-tags': (
"add-tags": (
AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
"tag...", "Add tags to the cluster"),
'remove-tags': (
"remove-tags": (
RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
"tag...", "Remove tags from the cluster"),
'search-tags': (
"search-tags": (
SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
"Searches the tags on all objects on"
" the cluster for a given pattern (regex)"),
'queue': (
"queue": (
QueueOps,
[ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])],
[], "drain|undrain|info", "Change queue properties"),
'watcher': (
"watcher": (
WatcherOps,
[ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]),
ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])],
[],
"{pause <timespec>|continue|info}", "Change watcher properties"),
'modify': (
"modify": (
SetClusterParams, ARGS_NONE,
[BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
......@@ -1347,7 +1347,7 @@ commands = {
#: dictionary with aliases for commands
aliases = {
'masterfailover': 'master-failover',
"masterfailover": "master-failover",
}
......
......@@ -143,7 +143,7 @@ def TestAllocator(opts, args):
while len(row) < 3:
row.append(None)
for i in range(3):
if row[i] == '':
if row[i] == "":
row[i] = None
nic_dict = [{
constants.INIC_MAC: v[0],
......@@ -616,7 +616,7 @@ def ListLocks(opts, args): # pylint: disable-msg=W0613
commands = {
'delay': (
"delay": (
Delay, [ArgUnknown(min=1, max=1)],
[cli_option("--no-master", dest="on_master", default=True,
action="store_false", help="Do not sleep in the master code"),
......@@ -627,7 +627,7 @@ commands = {
DRY_RUN_OPT, PRIORITY_OPT,
],
"[opts...] <duration>", "Executes a TestDelay OpCode"),
'submit-job': (
"submit-job": (
GenericOpCodes, [ArgFile(min=1)],
[VERBOSE_OPT,
cli_option("--op-repeat", type="int", default="1", dest="rep_op",
......@@ -642,7 +642,7 @@ commands = {
],
"<op_list_file...>", "Submits jobs built from json files"
" containing a list of serialized opcodes"),
'iallocator': (
"iallocator": (
TestAllocator, [ArgUnknown(min=1)],
[cli_option("--dir", dest="direction", default=constants.IALLOCATOR_DIR_IN,
choices=list(constants.VALID_IALLOCATOR_DIRECTIONS),
......
......@@ -287,7 +287,7 @@ def BatchCreate(opts, args):
"hvparams": {},
"file_storage_dir": None,
"force_variant": False,
"file_driver": 'loop'}
"file_driver": "loop"}
def _PopulateWithDefaults(spec):
"""Returns a new hash combined with default values."""
......@@ -298,25 +298,25 @@ def BatchCreate(opts, args):
def _Validate(spec):
"""Validate the instance specs."""
# Validate fields required under any circumstances
for required_field in ('os', 'template'):
for required_field in ("os", "template"):
if required_field not in spec:
raise errors.OpPrereqError('Required field "%s" is missing.' %
required_field, errors.ECODE_INVAL)
# Validate special fields
if spec['primary_node'] is not None:
if (spec['template'] in constants.DTS_INT_MIRROR and
spec['secondary_node'] is None):
raise errors.OpPrereqError('Template requires secondary node, but'
' there was no secondary provided.',
if spec["primary_node"] is not None:
if (spec["template"] in constants.DTS_INT_MIRROR and
spec["secondary_node"] is None):