Commit 3ccb3a64 authored by Michael Hanselmann's avatar Michael Hanselmann
Browse files

Replace single- with double-quotes



In at least two cases "%s" is replaced with str(), too.
Signed-off-by: default avatarMichael Hanselmann <hansmi@google.com>
Reviewed-by: default avatarIustin Pop <iustin@google.com>
parent 0f85ebd9
......@@ -1218,10 +1218,10 @@ class DRBD8(BaseDRBD):
defa = pyp.Literal("_is_default").suppress()
dbl_quote = pyp.Literal('"').suppress()
keyword = pyp.Word(pyp.alphanums + '-')
keyword = pyp.Word(pyp.alphanums + "-")
# value types
value = pyp.Word(pyp.alphanums + '_-/.:')
value = pyp.Word(pyp.alphanums + "_-/.:")
quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
pyp.Word(pyp.nums + ".") + colon + number)
......
......@@ -666,7 +666,7 @@ def SetupNodeDaemon(cluster_name, node, ssh_key_check):
(constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED,
utils.ShellQuote(bind_address)))
result = sshrunner.Run(node, 'root', mycommand, batch=False,
result = sshrunner.Run(node, "root", mycommand, batch=False,
ask_key=ssh_key_check,
use_cluster_key=True,
strict_host_check=ssh_key_check)
......@@ -708,7 +708,7 @@ def MasterFailover(no_voting=False):
" as master candidates. Only these nodes"
" can become masters. Current list of"
" master candidates is:\n"
"%s" % ('\n'.join(mc_no_master)),
"%s" % ("\n".join(mc_no_master)),
errors.ECODE_STATE)
if not no_voting:
......
......@@ -120,22 +120,22 @@ import_opts = [
commands = {
'list': (
"list": (
PrintExportList, ARGS_NONE,
[NODE_LIST_OPT],
"", "Lists instance exports available in the ganeti cluster"),
'export': (
"export": (
ExportInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, SINGLE_NODE_OPT, NOSHUTDOWN_OPT, SHUTDOWN_TIMEOUT_OPT,
REMOVE_INSTANCE_OPT, IGNORE_REMOVE_FAILURES_OPT, DRY_RUN_OPT,
PRIORITY_OPT],
"-n <target_node> [opts...] <name>",
"Exports an instance to an image"),
'import': (
"import": (
ImportInstance, ARGS_ONE_INSTANCE, COMMON_CREATE_OPTS + import_opts,
"[...] -t disk-type -n node[:secondary-node] <name>",
"Imports an instance from an exported image"),
'remove': (
"remove": (
RemoveExport, [ArgUnknown(min=1, max=1)], [DRY_RUN_OPT, PRIORITY_OPT],
"<name>", "Remove exports of named instance from the filesystem."),
}
......
......@@ -34,7 +34,7 @@ def PackMagic(payload):
"""Prepend the confd magic fourcc to a payload.
"""
return ''.join([constants.CONFD_MAGIC_FOURCC, payload])
return "".join([constants.CONFD_MAGIC_FOURCC, payload])
def UnpackMagic(payload):
......
......@@ -171,7 +171,7 @@ class ConfdClient:
"""
if now is None:
now = time.time()
tstamp = '%d' % now
tstamp = "%d" % now
req = serializer.DumpSignedJson(request.ToDict(), self._hmac_key, tstamp)
return confd.PackMagic(req)
......
......@@ -63,7 +63,7 @@ class ConfdQuery(object):
"""
status = constants.CONFD_REPL_STATUS_NOTIMPLEMENTED
answer = 'not implemented'
answer = "not implemented"
return status, answer
......@@ -80,10 +80,10 @@ class PingQuery(ConfdQuery):
"""
if query is None:
status = constants.CONFD_REPL_STATUS_OK
answer = 'ok'
answer = "ok"
else:
status = constants.CONFD_REPL_STATUS_ERROR
answer = 'non-empty ping query'
answer = "non-empty ping query"
return status, answer
......
......@@ -92,7 +92,7 @@ class ConfdProcessor(object):
"""
if self.disabled:
logging.debug('Confd is disabled. Ignoring query.')
logging.debug("Confd is disabled. Ignoring query.")
return
try:
request = self.ExtractRequest(payload_in)
......@@ -100,7 +100,7 @@ class ConfdProcessor(object):
payload_out = self.PackReply(reply, rsalt)
return payload_out
except errors.ConfdRequestError, err:
logging.info('Ignoring broken query from %s:%d: %s', ip, port, err)
logging.info("Ignoring broken query from %s:%d: %s", ip, port, err)
return None
def ExtractRequest(self, payload):
......@@ -130,7 +130,7 @@ class ConfdProcessor(object):
try:
request = objects.ConfdRequest.FromDict(message)
except AttributeError, err:
raise errors.ConfdRequestError('%s' % err)
raise errors.ConfdRequestError(str(err))
return request
......
......@@ -1839,8 +1839,8 @@ class ConfigWriter:
# Make sure the configuration has the right version
_ValidateConfig(data)
if (not hasattr(data, 'cluster') or
not hasattr(data.cluster, 'rsahostkeypub')):
if (not hasattr(data, "cluster") or
not hasattr(data.cluster, "rsahostkeypub")):
raise errors.ConfigurationError("Incomplete configuration"
" (missing cluster.rsahostkeypub)")
......
......@@ -382,7 +382,7 @@ class AsyncUDPSocket(GanetiBaseAsyncoreDispatcher):
"""
if len(payload) > constants.MAX_UDP_DATA_SIZE:
raise errors.UdpDataSizeError('Packet too big: %s > %s' % (len(payload),
raise errors.UdpDataSizeError("Packet too big: %s > %s" % (len(payload),
constants.MAX_UDP_DATA_SIZE))
self._out_queue.append((ip, port, payload))
......
......@@ -1465,7 +1465,7 @@ LEVEL_NAMES = {
}
# Constant for the big ganeti lock
BGL = 'BGL'
BGL = "BGL"
class GanetiLockManager:
......
......@@ -104,16 +104,16 @@ VS_TYPE = {
# AllocationUnits values and conversion
ALLOCATION_UNITS = {
'b': ["bytes", "b"],
'kb': ["kilobytes", "kb", "byte * 2^10", "kibibytes", "kib"],
'mb': ["megabytes", "mb", "byte * 2^20", "mebibytes", "mib"],
'gb': ["gigabytes", "gb", "byte * 2^30", "gibibytes", "gib"],
"b": ["bytes", "b"],
"kb": ["kilobytes", "kb", "byte * 2^10", "kibibytes", "kib"],
"mb": ["megabytes", "mb", "byte * 2^20", "mebibytes", "mib"],
"gb": ["gigabytes", "gb", "byte * 2^30", "gibibytes", "gib"],
}
CONVERT_UNITS_TO_MB = {
'b': lambda x: x / (1024 * 1024),
'kb': lambda x: x / 1024,
'mb': lambda x: x,
'gb': lambda x: x * 1024,
"b": lambda x: x / (1024 * 1024),
"kb": lambda x: x / 1024,
"mb": lambda x: x,
"gb": lambda x: x * 1024,
}
# Names of the config fields
......
......@@ -657,9 +657,9 @@ class RpcRunner(_RpcClientBase,
if osp is not None:
idict["osparams"].update(osp)
for nic in idict["nics"]:
nic['nicparams'] = objects.FillDict(
nic["nicparams"] = objects.FillDict(
cluster.nicparams[constants.PP_DEFAULT],
nic['nicparams'])
nic["nicparams"])
return idict
def _InstDictHvpBep(self, (instance, hvp, bep)):
......
......@@ -287,7 +287,7 @@ def GetHomeDir(user, default=None):
The user can be passed either as a string (denoting the name) or as
an integer (denoting the user id). If the user is not found, the
'default' argument is returned, which defaults to None.
C{default} argument is returned, which defaults to C{None}.
"""
try:
......
......@@ -116,7 +116,7 @@ class WatcherState(object):
fd = utils.WriteFile(filename,
data=serialized_form,
prewrite=utils.LockFile, close=False)
self.statefile = os.fdopen(fd, 'w+')
self.statefile = os.fdopen(fd, "w+")
def Close(self):
"""Unlock configuration file and close it.
......
......@@ -97,7 +97,7 @@ def main():
# Option parsing
parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
parser.add_option('--dry-run', dest='dry_run',
parser.add_option("--dry-run", dest="dry_run",
action="store_true",
help="Try to do the conversion, but don't write"
" output file")
......@@ -107,7 +107,7 @@ def main():
parser.add_option("--ignore-hostname", dest="ignore_hostname",
action="store_true", default=False,
help="Don't abort if hostname doesn't match")
parser.add_option('--path', help="Convert configuration in this"
parser.add_option("--path", help="Convert configuration in this"
" directory instead of '%s'" % constants.DATA_DIR,
default=constants.DATA_DIR, dest="data_dir")
parser.add_option("--no-verify",
......
......@@ -57,26 +57,26 @@ NoDefault = object()
# Dictionary with instance old keys, and new hypervisor keys
INST_HV_CHG = {
'hvm_pae': constants.HV_PAE,
'vnc_bind_address': constants.HV_VNC_BIND_ADDRESS,
'initrd_path': constants.HV_INITRD_PATH,
'hvm_nic_type': constants.HV_NIC_TYPE,
'kernel_path': constants.HV_KERNEL_PATH,
'hvm_acpi': constants.HV_ACPI,
'hvm_cdrom_image_path': constants.HV_CDROM_IMAGE_PATH,
'hvm_boot_order': constants.HV_BOOT_ORDER,
'hvm_disk_type': constants.HV_DISK_TYPE,
"hvm_pae": constants.HV_PAE,
"vnc_bind_address": constants.HV_VNC_BIND_ADDRESS,
"initrd_path": constants.HV_INITRD_PATH,
"hvm_nic_type": constants.HV_NIC_TYPE,
"kernel_path": constants.HV_KERNEL_PATH,
"hvm_acpi": constants.HV_ACPI,
"hvm_cdrom_image_path": constants.HV_CDROM_IMAGE_PATH,
"hvm_boot_order": constants.HV_BOOT_ORDER,
"hvm_disk_type": constants.HV_DISK_TYPE,
}
# Instance beparams changes
INST_BE_CHG = {
'vcpus': constants.BE_VCPUS,
'memory': constants.BE_MEMORY,
'auto_balance': constants.BE_AUTO_BALANCE,
"vcpus": constants.BE_VCPUS,
"memory": constants.BE_MEMORY,
"auto_balance": constants.BE_AUTO_BALANCE,
}
# Field names
F_SERIAL = 'serial_no'
F_SERIAL = "serial_no"
class Error(Exception):
......@@ -97,7 +97,7 @@ def ReadFile(file_name, default=NoDefault):
"""
logging.debug("Reading %s", file_name)
try:
fh = open(file_name, 'r')
fh = open(file_name, "r")
except IOError, err:
if default is not NoDefault and err.errno == errno.ENOENT:
return default
......@@ -161,17 +161,17 @@ def Cluster12To20(cluster):
"""
logging.info("Upgrading the cluster object")
# Upgrade the configuration version
if 'config_version' in cluster:
del cluster['config_version']
if "config_version" in cluster:
del cluster["config_version"]
# Add old ssconf keys back to config
logging.info(" - importing ssconf keys")
for key in ('master_node', 'master_ip', 'master_netdev', 'cluster_name'):
for key in ("master_node", "master_ip", "master_netdev", "cluster_name"):
if key not in cluster:
cluster[key] = ReadFile(SsconfName(key)).strip()
if 'default_hypervisor' not in cluster:
old_hyp = ReadFile(SsconfName('hypervisor')).strip()
if "default_hypervisor" not in cluster:
old_hyp = ReadFile(SsconfName("hypervisor")).strip()
if old_hyp == "xen-3.0":
hyp = "xen-pvm"
elif old_hyp == "xen-hvm-3.1":
......@@ -182,24 +182,24 @@ def Cluster12To20(cluster):
raise Error("Unknown old hypervisor name '%s'" % old_hyp)
logging.info("Setting the default and enabled hypervisor")
cluster['default_hypervisor'] = hyp
cluster['enabled_hypervisors'] = [hyp]
cluster["default_hypervisor"] = hyp
cluster["enabled_hypervisors"] = [hyp]
# hv/be params
if 'hvparams' not in cluster:
if "hvparams" not in cluster:
logging.info(" - adding hvparams")
cluster['hvparams'] = constants.HVC_DEFAULTS
if 'beparams' not in cluster:
cluster["hvparams"] = constants.HVC_DEFAULTS
if "beparams" not in cluster:
logging.info(" - adding beparams")
cluster['beparams'] = {constants.PP_DEFAULT: constants.BEC_DEFAULTS}
cluster["beparams"] = {constants.PP_DEFAULT: constants.BEC_DEFAULTS}
# file storage
if 'file_storage_dir' not in cluster:
cluster['file_storage_dir'] = constants.DEFAULT_FILE_STORAGE_DIR
if "file_storage_dir" not in cluster:
cluster["file_storage_dir"] = constants.DEFAULT_FILE_STORAGE_DIR
# candidate pool size
if 'candidate_pool_size' not in cluster:
cluster['candidate_pool_size'] = constants.MASTER_POOL_SIZE_DEFAULT
if "candidate_pool_size" not in cluster:
cluster["candidate_pool_size"] = constants.MASTER_POOL_SIZE_DEFAULT
def Node12To20(node):
......@@ -209,9 +209,9 @@ def Node12To20(node):
logging.info("Upgrading node %s", node['name'])
if F_SERIAL not in node:
node[F_SERIAL] = 1
if 'master_candidate' not in node:
node['master_candidate'] = True
for key in 'offline', 'drained':
if "master_candidate" not in node:
node["master_candidate"] = True
for key in "offline", "drained":
if key not in node:
node[key] = False
......@@ -223,12 +223,12 @@ def Instance12To20(drbd_minors, secrets, hypervisor, instance):
if F_SERIAL not in instance:
instance[F_SERIAL] = 1
if 'hypervisor' not in instance:
instance['hypervisor'] = hypervisor
if "hypervisor" not in instance:
instance["hypervisor"] = hypervisor
# hvparams changes
if 'hvparams' not in instance:
instance['hvparams'] = hvp = {}
if "hvparams" not in instance:
instance["hvparams"] = hvp = {}
for old, new in INST_HV_CHG.items():
if old in instance:
if (instance[old] is not None and
......@@ -238,8 +238,8 @@ def Instance12To20(drbd_minors, secrets, hypervisor, instance):
del instance[old]
# beparams changes
if 'beparams' not in instance:
instance['beparams'] = bep = {}
if "beparams" not in instance:
instance["beparams"] = bep = {}
for old, new in INST_BE_CHG.items():
if old in instance:
if instance[old] is not None:
......@@ -247,23 +247,23 @@ def Instance12To20(drbd_minors, secrets, hypervisor, instance):
del instance[old]
# disk changes
for disk in instance['disks']:
for disk in instance["disks"]:
Disk12To20(drbd_minors, secrets, disk)
# other instance changes
if 'status' in instance:
instance['admin_up'] = instance['status'] == 'up'
del instance['status']
if "status" in instance:
instance["admin_up"] = instance["status"] == "up"
del instance["status"]
def Disk12To20(drbd_minors, secrets, disk):
"""Upgrades a disk from 1.2 to 2.0.
"""
if 'mode' not in disk:
disk['mode'] = constants.DISK_RDWR
if disk['dev_type'] == constants.LD_DRBD8:
old_lid = disk['logical_id']
if "mode" not in disk:
disk["mode"] = constants.DISK_RDWR
if disk["dev_type"] == constants.LD_DRBD8:
old_lid = disk["logical_id"]
for node in old_lid[:2]:
if node not in drbd_minors:
raise Error("Can't find node '%s' while upgrading disk" % node)
......@@ -271,9 +271,9 @@ def Disk12To20(drbd_minors, secrets, disk):
minor = drbd_minors[node]
old_lid.append(minor)
old_lid.append(GenerateSecret(secrets))
del disk['physical_id']
if disk['children']:
for child in disk['children']:
del disk["physical_id"]
if disk["children"]:
for child in disk["children"]:
Disk12To20(drbd_minors, secrets, child)
......@@ -288,14 +288,14 @@ def main():
# Option parsing
parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
parser.add_option('--dry-run', dest='dry_run',
parser.add_option("--dry-run", dest="dry_run",
action="store_true",
help="Try to do the conversion, but don't write"
" output file")
parser.add_option(cli.FORCE_OPT)
parser.add_option(cli.DEBUG_OPT)
parser.add_option(cli.VERBOSE_OPT)
parser.add_option('--path', help="Convert configuration in this"
parser.add_option("--path", help="Convert configuration in this"
" directory instead of '%s'" % constants.DATA_DIR,
default=constants.DATA_DIR, dest="data_dir")
(options, args) = parser.parse_args()
......@@ -327,7 +327,7 @@ def main():
raise Error(("%s does not seem to be a known Ganeti configuration"
" directory") % options.data_dir)
config_version = ReadFile(SsconfName('config_version'), "1.2").strip()
config_version = ReadFile(SsconfName("config_version"), "1.2").strip()
logging.info("Found configuration version %s", config_version)
config_data = serializer.LoadJson(ReadFile(options.CONFIG_DATA_PATH))
......@@ -343,8 +343,8 @@ def main():
if old_config_version not in (3, ):
raise Error("Unsupported configuration version: %s" %
old_config_version)
if 'version' not in config_data:
config_data['version'] = constants.BuildVersion(2, 0, 0)
if "version" not in config_data:
config_data["version"] = constants.BuildVersion(2, 0, 0)
if F_SERIAL not in config_data:
config_data[F_SERIAL] = 1
......@@ -361,8 +361,8 @@ def main():
" instances using remote_raid1 disk template")
# Build content of new known_hosts file
cluster_name = ReadFile(SsconfName('cluster_name')).rstrip()
cluster_key = cluster['rsahostkeypub']
cluster_name = ReadFile(SsconfName("cluster_name")).rstrip()
cluster_key = cluster["rsahostkeypub"]
known_hosts = "%s ssh-rsa %s\n" % (cluster_name, cluster_key)
Cluster12To20(cluster)
......@@ -370,17 +370,17 @@ def main():
# Add node attributes
logging.info("Upgrading nodes")
# stable-sort the names to have repeatable runs
for node_name in utils.NiceSort(config_data['nodes'].keys()):
Node12To20(config_data['nodes'][node_name])
for node_name in utils.NiceSort(config_data["nodes"].keys()):
Node12To20(config_data["nodes"][node_name])
# Instance changes
logging.info("Upgrading instances")
drbd_minors = dict.fromkeys(config_data['nodes'], 0)
drbd_minors = dict.fromkeys(config_data["nodes"], 0)
secrets = set()
# stable-sort the names to have repeatable runs
for instance_name in utils.NiceSort(config_data['instances'].keys()):
Instance12To20(drbd_minors, secrets, cluster['default_hypervisor'],
config_data['instances'][instance_name])
for instance_name in utils.NiceSort(config_data["instances"].keys()):
Instance12To20(drbd_minors, secrets, cluster["default_hypervisor"],
config_data["instances"][instance_name])
else:
logging.info("Found a Ganeti 2.0 configuration")
......
......@@ -197,7 +197,7 @@ class Merger(object):
raise errors.RemoteError("Unable to retrieve list of nodes from %s."
" Fail reason: %s; output: %s" %
(cluster, result.fail_reason, result.output))
nodes_statuses = [line.split(',') for line in result.stdout.splitlines()]
nodes_statuses = [line.split(",") for line in result.stdout.splitlines()]
nodes = [node_status[0] for node_status in nodes_statuses
if node_status[1] == "N"]
......
......@@ -208,7 +208,7 @@ def IsPartitioned(disk):
Currently only md devices are used as is.
"""
return not (disk.startswith('md') or PART_RE.match(disk))
return not (disk.startswith("md") or PART_RE.match(disk))
def DeviceName(disk):
......@@ -219,9 +219,9 @@ def DeviceName(disk):
"""
if IsPartitioned(disk):
device = '/dev/%s1' % disk
device = "/dev/%s1" % disk
else:
device = '/dev/%s' % disk
device = "/dev/%s" % disk
return device
......@@ -268,7 +268,7 @@ def CheckPrereq():
raise PrereqError("This tool runs as root only. Really.")
osname, _, release, _, _ = os.uname()
if osname != 'Linux':
if osname != "Linux":
raise PrereqError("This tool only runs on Linux"
" (detected OS: %s)." % osname)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment