Commit e687ec01 authored by Michael Hanselmann's avatar Michael Hanselmann

PEP8 style fixes

Identified using the “pep8” utility.
Signed-off-by: default avatarMichael Hanselmann <hansmi@google.com>
Reviewed-by: default avatarIustin Pop <iustin@google.com>
parent b7bf8b58
......@@ -128,7 +128,7 @@ class StatusFile:
@param port: TCP/UDP port
"""
assert isinstance(port, (int, long)) and 0 < port < 2**16
assert isinstance(port, (int, long)) and 0 < port < (2 ** 16)
self._data.listen_port = port
def GetListenPort(self):
......
......@@ -34,6 +34,7 @@ except ImportError:
from ganeti import daemon
from ganeti import errors
# We contributed the AsyncNotifier class back to python-pyinotify, and it's
# part of their codebase since version 0.8.7. This code can be removed once
# we'll be ready to depend on python-pyinotify >= 0.8.7
......
......@@ -730,7 +730,7 @@ def GetVolumeList(vg_names):
# we don't want to report such volumes as existing, since they
# don't really hold data
continue
lvs[vg_name+"/"+name] = (size, inactive, online)
lvs[vg_name + "/" + name] = (size, inactive, online)
return lvs
......@@ -2359,7 +2359,7 @@ def FinalizeExport(instance, snap_disks):
config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param),
"%s" % nic.nicparams.get(param, None))
# TODO: redundant: on load can read nics until it doesn't exist
config.set(constants.INISECT_INS, "nic_count" , "%d" % nic_total)
config.set(constants.INISECT_INS, "nic_count", "%d" % nic_total)
disk_total = 0
for disk_count, disk in enumerate(snap_disks):
......@@ -2372,7 +2372,7 @@ def FinalizeExport(instance, snap_disks):
config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
("%d" % disk.size))
config.set(constants.INISECT_INS, "disk_count" , "%d" % disk_total)
config.set(constants.INISECT_INS, "disk_count", "%d" % disk_total)
# New-style hypervisor/backend parameters
......@@ -3378,7 +3378,6 @@ class HooksRunner(object):
else:
_Fail("Unknown hooks phase '%s'", phase)
subdir = "%s-%s.d" % (hpath, suffix)
dir_name = utils.PathJoin(self._BASE_DIR, subdir)
......
......@@ -321,7 +321,6 @@ class BlockDev(object):
is_degraded=is_degraded,
ldisk_status=ldisk_status)
def SetInfo(self, text):
"""Update metadata with info text.
......@@ -408,12 +407,12 @@ class LogicalVolume(BlockDev):
pvs_info.sort()
pvs_info.reverse()
pvlist = [ pv[1] for pv in pvs_info ]
pvlist = [pv[1] for pv in pvs_info]
if compat.any(":" in v for v in pvlist):
_ThrowError("Some of your PVs have the invalid character ':' in their"
" name, this is not supported - please filter them out"
" in lvm.conf using either 'filter' or 'preferred_names'")
free_size = sum([ pv[0] for pv in pvs_info ])
free_size = sum([pv[0] for pv in pvs_info])
current_pvs = len(pvlist)
stripes = min(current_pvs, constants.LVM_STRIPECOUNT)
......@@ -1113,7 +1112,7 @@ class DRBD8(BaseDRBD):
super(DRBD8, self).__init__(unique_id, children, size)
self.major = self._DRBD_MAJOR
version = self._GetVersion(self._GetProcData())
if version["k_major"] != 8 :
if version["k_major"] != 8:
_ThrowError("Mismatch in DRBD kernel version and requested ganeti"
" usage: kernel is %s.%s, ganeti wants 8.x",
version["k_major"], version["k_minor"])
......@@ -1194,7 +1193,7 @@ class DRBD8(BaseDRBD):
# this also converts the value to an int
number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
comment = pyp.Literal ("#") + pyp.Optional(pyp.restOfLine)
comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
defa = pyp.Literal("_is_default").suppress()
dbl_quote = pyp.Literal('"').suppress()
......
......@@ -270,7 +270,6 @@ def InitCluster(cluster_name, mac_prefix, # pylint: disable-msg=R0913
" entries: %s" % invalid_hvs,
errors.ECODE_INVAL)
ipcls = None
if primary_ip_version == constants.IP4_VERSION:
ipcls = netutils.IP4Address
......@@ -661,6 +660,7 @@ def MasterFailover(no_voting=False):
master_ip = sstore.GetMasterIP()
total_timeout = 30
# Here we have a phase where no master should be running
def _check_ip():
if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
......
......@@ -728,9 +728,9 @@ BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
type="keyval", default={},
help="Backend parameters")
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
default={}, dest="hvparams",
help="Hypervisor parameters")
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
default={}, dest="hvparams",
help="Hypervisor parameters")
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
help="Hypervisor and hypervisor options, in the"
......@@ -2815,7 +2815,7 @@ def FormatTimestamp(ts):
@return: a string with the formatted timestamp
"""
if not isinstance (ts, (tuple, list)) or len(ts) != 2:
if not isinstance(ts, (tuple, list)) or len(ts) != 2:
return "?"
sec, usec = ts
return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
......
......@@ -113,9 +113,9 @@ def GenericOpCodes(opts, args):
t3 = time.time()
ToStdout("C:op %4d" % op_cnt)
ToStdout("C:job %4d" % job_cnt)
ToStdout("T:submit %4.4f" % (t2-t1))
ToStdout("T:exec %4.4f" % (t3-t2))
ToStdout("T:total %4.4f" % (t3-t1))
ToStdout("T:submit %4.4f" % (t2 - t1))
ToStdout("T:exec %4.4f" % (t3 - t2))
ToStdout("T:total %4.4f" % (t3 - t1))
return 0
......@@ -689,5 +689,6 @@ aliases = {
"allocator": "iallocator",
}
def Main():
return GenericMain(commands, aliases=aliases)
......@@ -1119,13 +1119,13 @@ def _FormatList(buf, data, indent_level):
if isinstance(elem, tuple)] or [0])
for elem in data:
if isinstance(elem, basestring):
buf.write("%*s%s\n" % (2*indent_level, "", elem))
buf.write("%*s%s\n" % (2 * indent_level, "", elem))
elif isinstance(elem, tuple):
key, value = elem
spacer = "%*s" % (max_tlen - len(key), "")
buf.write("%*s%s:%s %s\n" % (2*indent_level, "", key, spacer, value))
buf.write("%*s%s:%s %s\n" % (2 * indent_level, "", key, spacer, value))
elif isinstance(elem, list):
_FormatList(buf, elem, indent_level+1)
_FormatList(buf, elem, indent_level + 1)
def ShowInstanceConfig(opts, args):
......@@ -1179,7 +1179,7 @@ def ShowInstanceConfig(opts, args):
buf.write(" Operating system: %s\n" % instance["os"])
FormatParameterDict(buf, instance["os_instance"], instance["os_actual"],
level=2)
if instance.has_key("network_port"):
if "network_port" in instance:
buf.write(" Allocated network port: %s\n" %
compat.TryToRoman(instance["network_port"],
convert=opts.roman_integers))
......
......@@ -4085,6 +4085,7 @@ class LUOobCommand(NoHooksLU):
raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
utils.CommaJoin(errs))
class _OsQuery(_QueryBase):
FIELDS = query.OS_FIELDS
......@@ -7950,7 +7951,7 @@ def _ComputeDiskSizePerVG(disk_template, disks):
if disk_template not in req_size_dict:
raise errors.ProgrammerError("Disk template '%s' size requirement"
" is unknown" % disk_template)
" is unknown" % disk_template)
return req_size_dict[disk_template]
......@@ -7972,7 +7973,7 @@ def _ComputeDiskSize(disk_template, disks):
if disk_template not in req_size_dict:
raise errors.ProgrammerError("Disk template '%s' size requirement"
" is unknown" % disk_template)
" is unknown" % disk_template)
return req_size_dict[disk_template]
......@@ -8886,7 +8887,7 @@ class LUInstanceCreate(LogicalUnit):
# 'fake' LV disks with the old data, plus the new unique_id
tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
rename_to = []
for t_dsk, a_dsk in zip (tmp_disks, self.disks):
for t_dsk, a_dsk in zip(tmp_disks, self.disks):
rename_to.append(t_dsk.logical_id)
t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
self.cfg.SetDiskID(t_dsk, pnode_name)
......@@ -10826,7 +10827,7 @@ class LUInstanceSetParams(LogicalUnit):
if msg:
# Assume the primary node is unreachable and go ahead
self.warn.append("Can't get info from primary node %s: %s" %
(pnode, msg))
(pnode, msg))
elif not isinstance(pninfo.payload.get("memory_free", None), int):
self.warn.append("Node data from primary node %s doesn't contain"
" free memory information" % pnode)
......@@ -12143,7 +12144,6 @@ class LUGroupSetParams(LogicalUnit):
return result
class LUGroupRemove(LogicalUnit):
HPATH = "group-remove"
HTYPE = constants.HTYPE_GROUP
......
......@@ -67,6 +67,7 @@ def _all(seq):
return False
return True
def _any(seq):
"""Returns True if any element of the iterable are True.
......@@ -75,6 +76,7 @@ def _any(seq):
return True
return False
try:
# pylint: disable-msg=E0601
# pylint: disable-msg=W0622
......@@ -89,6 +91,7 @@ try:
except NameError:
any = _any
def partition(seq, pred=bool): # pylint: disable-msg=W0622
"""Partition a list in two, based on the given predicate.
......
......@@ -50,4 +50,3 @@ def UnpackMagic(payload):
raise errors.ConfdMagicError("UDP payload contains an unkown fourcc")
return payload[_FOURCC_LEN:]
......@@ -333,7 +333,7 @@ class ConfdClient:
elif peer_cnt < 5:
return peer_cnt - 1
else:
return int(peer_cnt/2) + 1
return int(peer_cnt / 2) + 1
def WaitForReply(self, salt, timeout=constants.CONFD_CLIENT_EXPIRE_TIMEOUT):
"""Wait for replies to a given request.
......
......@@ -539,7 +539,6 @@ class ConfigWriter:
cluster.SimpleFillND(nodegroup.ndparams),
constants.NDS_PARAMETER_TYPES)
# drbd minors check
_, duplicates = self._UnlockedComputeDRBDMap()
for node, minor, instance_a, instance_b in duplicates:
......
......@@ -102,7 +102,7 @@ NODED_GROUP = _autoconf.NODED_GROUP
# Wipe
DD_CMD = "dd"
WIPE_BLOCK_SIZE = 1024**2
WIPE_BLOCK_SIZE = 1024 ** 2
MAX_WIPE_CHUNK = 1024 # 1GB
MIN_WIPE_CHUNK_PERCENT = 10
......@@ -125,7 +125,11 @@ IMPORT_EXPORT_DIR_MODE = 0755
ADOPTABLE_BLOCKDEV_ROOT = "/dev/disk/"
# keep RUN_GANETI_DIR first here, to make sure all get created when the node
# daemon is started (this takes care of RUN_DIR being tmpfs)
SUB_RUN_DIRS = [ RUN_GANETI_DIR, BDEV_CACHE_DIR, DISK_LINKS_DIR ]
SUB_RUN_DIRS = [
RUN_GANETI_DIR,
BDEV_CACHE_DIR,
DISK_LINKS_DIR,
]
LOCK_DIR = _autoconf.LOCALSTATEDIR + "/lock"
SSCONF_LOCK_FILE = LOCK_DIR + "/ganeti-ssconf.lock"
# User-id pool lock directory
......@@ -1209,7 +1213,7 @@ RS_ALL = frozenset([
#: Dictionary with special field cases and their verbose/terse formatting
RSS_DESCRIPTION = {
RS_UNKNOWN: ("(unknown)", "??"),
RS_NODATA: ("(nodata)", "?"),
RS_NODATA: ("(nodata)", "?"),
RS_OFFLINE: ("(offline)", "*"),
RS_UNAVAIL: ("(unavail)", "-"),
}
......@@ -1440,7 +1444,7 @@ MAX_UDP_DATA_SIZE = 61440
# User-id pool minimum/maximum acceptable user-ids.
UIDPOOL_UID_MIN = 0
UIDPOOL_UID_MAX = 2**32-1 # Assuming 32 bit user-ids
UIDPOOL_UID_MAX = 2 ** 32 - 1 # Assuming 32 bit user-ids
# Name or path of the pgrep command
PGREP = "pgrep"
......
......@@ -482,4 +482,4 @@ def MaybeRaise(result):
error = GetEncodedError(result)
if error:
(errcls, args) = error
raise errcls, args
raise errcls(args)
......@@ -319,7 +319,7 @@ class HttpClientPool:
"""
try:
pclient = self._pool.pop(identity)
pclient = self._pool.pop(identity)
except KeyError:
# Need to create new client
client = self._GetHttpClientCreator()(self._curl_config_fn)
......
......@@ -386,11 +386,11 @@ class BaseHypervisor(object):
key = splitfields[0].strip()
val = splitfields[1].strip()
if key == "MemTotal":
result["memory_total"] = int(val.split()[0])/1024
result["memory_total"] = int(val.split()[0]) / 1024
elif key in ("MemFree", "Buffers", "Cached"):
sum_free += int(val.split()[0])/1024
sum_free += int(val.split()[0]) / 1024
elif key == "Active":
result["memory_dom0"] = int(val.split()[0])/1024
result["memory_dom0"] = int(val.split()[0]) / 1024
except (ValueError, TypeError), err:
raise errors.HypervisorError("Failed to compute memory usage: %s" %
(err,))
......
......@@ -267,7 +267,7 @@ class KVMHypervisor(hv_base.BaseHypervisor):
arg_list = cmdline.split("\x00")
while arg_list:
arg = arg_list.pop(0)
arg = arg_list.pop(0)
if arg == "-name":
instance = arg_list.pop(0)
elif arg == "-m":
......@@ -526,7 +526,7 @@ class KVMHypervisor(hv_base.BaseHypervisor):
"""
_, v_major, v_min, _ = self._GetKVMVersion()
pidfile = self._InstancePidFile(instance.name)
pidfile = self._InstancePidFile(instance.name)
kvm = constants.KVM_PATH
kvm_cmd = [kvm]
# used just by the vnc server, if enabled
......
......@@ -163,7 +163,7 @@ class XenHypervisor(hv_base.BaseHypervisor):
@return: tuple (name, id, memory, vcpus, stat, times)
"""
xm_list = self._GetXMList(instance_name=="Domain-0")
xm_list = self._GetXMList(instance_name == "Domain-0")
result = None
for data in xm_list:
if data[0] == instance_name:
......
......@@ -58,7 +58,7 @@ _IP_FAMILY_RE = re.compile(r"(?P<family>inet6?)\s+(?P<ip>%s)/" % _IP_RE_TEXT,
# Dict used to convert from a string representing an IP family to an IP
# version
_NAME_TO_IP_VER = {
_NAME_TO_IP_VER = {
"inet": constants.IP4_VERSION,
"inet6": constants.IP6_VERSION,
}
......@@ -417,9 +417,9 @@ class IPAddress(object):
assert 0 <= prefix <= cls.iplen
target_int = cls._GetIPIntFromString(subnet[0])
# Convert prefix netmask to integer value of netmask
netmask_int = (2**cls.iplen)-1 ^ ((2**cls.iplen)-1 >> prefix)
netmask_int = (2 ** cls.iplen) - 1 ^ ((2 ** cls.iplen) - 1 >> prefix)
# Calculate hostmask
hostmask_int = netmask_int ^ (2**cls.iplen)-1
hostmask_int = netmask_int ^ (2 ** cls.iplen) - 1
# Calculate network address by and'ing netmask
network_int = target_int & netmask_int
# Calculate broadcast address by or'ing hostmask
......
......@@ -662,7 +662,7 @@ class Disk(ConfigObject):
"""
if self.dev_type == constants.LD_LV:
val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
elif self.dev_type in constants.LDS_DRBD:
node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
val = "<DRBD8("
......@@ -789,7 +789,9 @@ class Instance(TaggableObject):
node = self.primary_node
if lvmap is None:
lvmap = { node : [] }
lvmap = {
node: [],
}
ret = lvmap
else:
if not node in lvmap:
......@@ -801,7 +803,7 @@ class Instance(TaggableObject):
for dev in devs:
if dev.dev_type == constants.LD_LV:
lvmap[node].append(dev.logical_id[0]+"/"+dev.logical_id[1])
lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
elif dev.dev_type in constants.LDS_DRBD:
if dev.children:
......
......@@ -1536,6 +1536,7 @@ class OpTagsDel(OpCode):
("name", ht.NoDefault, ht.TMaybeString, None),
]
# Test opcodes
class OpTestDelay(OpCode):
"""Sleeps for a configured amount of time.
......
......@@ -53,7 +53,10 @@ def BuildUriList(ids, uri_format, uri_fields=("name", "uri")):
(field_id, field_uri) = uri_fields
def _MapId(m_id):
return { field_id: m_id, field_uri: uri_format % m_id, }
return {
field_id: m_id,
field_uri: uri_format % m_id,
}
# Make sure the result is sorted, makes it nicer to look at and simplifies
# unittests.
......
......@@ -664,7 +664,6 @@ def _ParseModifyGroupRequest(name, data):
})
class R_2_groups_name_modify(baserlib.R_Generic):
"""/2/groups/[group_name]/modify resource.
......
......@@ -187,4 +187,3 @@ def GetEnts(resolver=GetentResolver):
_priv_lock.release()
return _priv
......@@ -82,7 +82,7 @@ class ConfdAsyncUDPServer(daemon.AsyncUDPSocket):
logging.debug(err)
return
answer = self.processor.ExecQuery(query, ip, port)
answer = self.processor.ExecQuery(query, ip, port)
if answer is not None:
try:
self.enqueue_send(ip, port, confd.PackMagic(answer))
......
......@@ -113,6 +113,7 @@ class MasterClientHandler(daemon.AsyncTerminatedMessageStream):
"""
_MAX_UNHANDLED = 1
def __init__(self, server, connected_socket, client_address, family):
daemon.AsyncTerminatedMessageStream.__init__(self, connected_socket,
client_address,
......
......@@ -711,7 +711,6 @@ class NodeHttpServer(http.server.HttpServer):
"""
return backend.DemoteFromMC()
@staticmethod
def perspective_node_powercycle(params):
"""Tries to powercycle the nod.
......@@ -720,7 +719,6 @@ class NodeHttpServer(http.server.HttpServer):
hypervisor_type = params[0]
return backend.PowercycleNode(hypervisor_type)
# cluster --------------------------
@staticmethod
......
......@@ -511,7 +511,7 @@ def TailFile(fname, lines=20):
try:
fd.seek(0, 2)
pos = fd.tell()
pos = max(0, pos-4096)
pos = max(0, pos - 4096)
fd.seek(pos, 0)
raw_data = fd.read()
finally:
......
......@@ -84,7 +84,6 @@ class RunResult(object):
__slots__ = ["exit_code", "signal", "stdout", "stderr",
"failed", "fail_reason", "cmd"]
def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
timeout):
self.cmd = cmd
......
......@@ -206,6 +206,7 @@ def SimpleRetry(expected, fn, delay, timeout, args=None, wait_fn=time.sleep,
"""
rdict = {}
def helper(*innerargs):
# pylint: disable-msg=W0142
result = rdict["result"] = fn(*innerargs)
......
......@@ -338,7 +338,7 @@ def SafeEncode(text):
resu = ""
for char in text:
c = ord(char)
if char == "\t":
if char == "\t":
resu += r"\t"
elif char == "\n":
resu += r"\n"
......
......@@ -131,9 +131,9 @@ class NodeMaintenance(object):
"""
my_name = netutils.Hostname.GetSysName()
req = confd.client.ConfdClientRequest(type=
constants.CONFD_REQ_NODE_ROLE_BYNAME,
query=my_name)
req = \
confd.client.ConfdClientRequest(type=constants.CONFD_REQ_NODE_ROLE_BYNAME,
query=my_name)
self.confd_client.SendRequest(req, async=False, coverage=-1)
timed_out, _, _ = self.confd_client.WaitForReply(req.rsalt)
if not timed_out:
......
......@@ -51,6 +51,7 @@ LOG_HEADERS = {
2: ""
}
class InstanceDown(Exception):
"""The checked instance was not up"""
......@@ -73,8 +74,8 @@ def Log(msg, *args, **kwargs):
"""
if args:
msg = msg % args
indent = kwargs.get('indent', 0)
sys.stdout.write("%*s%s%s\n" % (2*indent, "",
indent = kwargs.get("indent", 0)
sys.stdout.write("%*s%s%s\n" % (2 * indent, "",
LOG_HEADERS.get(indent, " "), msg))
sys.stdout.flush()
......@@ -564,8 +565,8 @@ class Burner(object):
Log(msg, indent=2)
op = opcodes.OpInstanceCreate(instance_name=instance,
disks = [ {"size": size}
for size in self.disk_size],
disks=[{"size": size}
for size in self.disk_size],
disk_template=self.opts.disk_template,
nics=self.opts.nics,
mode=constants.INSTANCE_CREATE,
......@@ -719,8 +720,8 @@ class Burner(object):
ignore_failures=True)
imp_dir = utils.PathJoin(constants.EXPORT_DIR, full_name)
imp_op = opcodes.OpInstanceCreate(instance_name=instance,
disks = [ {"size": size}
for size in self.disk_size],
disks=[{"size": size}
for size in self.disk_size],
disk_template=self.opts.disk_template,
nics=self.opts.nics,
mode=constants.INSTANCE_IMPORT,
......@@ -996,7 +997,7 @@ class Burner(object):
if opts.do_replace1 and opts.disk_template in constants.DTS_INT_MIRROR:
self.BurnReplaceDisks1D8()
if (opts.do_replace2 and len(self.nodes) > 2 and
opts.disk_template in constants.DTS_INT_MIRROR) :
opts.disk_template in constants.DTS_INT_MIRROR):
self.BurnReplaceDisks2()
if (opts.disk_template in constants.DTS_GROWABLE and
......
......@@ -134,6 +134,7 @@ class Merger(object):
constants.INSTST_RUNNING,
constants.INSTST_ERRORUP,
])
def __init__(self, clusters, pause_period, groups, restart, params,
stop_instances):
"""Initialize object with sane defaults and infos required.
......@@ -160,7 +161,6 @@ class Merger(object):
if self.restart == _RESTART_UP:
raise NotImplementedError
def Setup(self):
"""Sets up our end so we can do the merger.
......
......@@ -338,7 +338,7 @@ def RunRemoteCommand(connection, command, logfile):
WriteLog("SUCCESS: command output follows", logfile)
for line in output.split("\n"):
WriteLog("output = %s" %line, logfile)
WriteLog("output = %s" % line, logfile)
WriteLog("command execution completed", logfile)
session.close()
......
......@@ -85,7 +85,8 @@ PART_RE = re.compile("^((?:h|s|m|ub)d[a-z]{1,2})[0-9]+$")
#: Minimum partition size to be considered (1 GB)
PART_MINSIZE = 1024 * 1024 * 1024
MBR_MAX_SIZE = 2 * 10**12
MBR_MAX_SIZE = 2 * (10 ** 12)
class Error(Exception):
"""Generic exception"""
......@@ -192,7 +193,6 @@ def ParseOptions():
action="store_true", default=False,
help="use sfdisk instead of parted")
options, args = parser.parse_args()
if len(args) != 1:
Usage()
......@@ -232,7 +232,7 @@ def SysfsName(disk):
match = PART_RE.match(disk)