Commit 5ae4945a authored by Iustin Pop's avatar Iustin Pop

Bump pep8 version to 1.2

Debian Wheezy will ship with this version, and it has many improved checks compared to 0.6, so let's:

- bump version in the docs
- silence some new checks that are wrong due to our indent=2 instead of 4
- fix lots of errors in the code where the indentation was wrong by 1
  or 2 spaces
- fix a few cases of == True, False, None and replace with 'is'
- re-indent some cases where the code is OK, but pep8 complains
Signed-off-by: default avatarIustin Pop <iustin@google.com>
Reviewed-by: default avatarRené Nussbaumer <rn@google.com>
parent 1df4d430
......@@ -1308,9 +1308,19 @@ hs-check: htools/test htools/hpc-htools $(HS_BUILT_TEST_HELPERS)
HBINARY="./htools/hpc-htools" ./htools/offline-test.sh
# E111: indentation is not a multiple of four
# E121: continuation line indentation is not a multiple of four
# (since our indent level is not 4)
# E125: continuation line does not distinguish itself from next logical line
# (since our indent level is not 4)
# E127: continuation line over-indented for visual indent
# (since our indent level is not 4)
# note: do NOT add E128 here; it's a valid style error in most cases!
# I've seen real errors, but also some cases were we indent wrongly
# due to line length; try to rework the cases where it is triggered,
# instead of silencing it
# E261: at least two spaces before inline comment
# E501: line too long (80 characters)
PEP8_IGNORE = E111,E261,E501
PEP8_IGNORE = E111,E121,E125,E127,E261,E501
# For excluding pep8 expects filenames only, not whole paths
PEP8_EXCLUDE = $(subst $(space),$(comma),$(strip $(notdir $(BUILT_PYTHON_SOURCES))))
......
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
# Copyright (C) 2009, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -108,9 +108,9 @@ def WritePreamble(sw):
sw.Write("}")
for (fnname, paths) in [
("os", constants.OS_SEARCH_PATH),
("iallocator", constants.IALLOCATOR_SEARCH_PATH),
]:
("os", constants.OS_SEARCH_PATH),
("iallocator", constants.IALLOCATOR_SEARCH_PATH),
]:
sw.Write("_ganeti_%s() {", fnname)
sw.IncIndent()
try:
......
......@@ -35,7 +35,7 @@ must be used::
The same with pep8, other versions may give you errors::
$ pep8 --version
0.6.1
1.2
To generate unittest coverage reports (``make coverage``), `coverage
<http://pypi.python.org/pypi/coverage>`_ needs to be installed.
......@@ -49,7 +49,7 @@ Installation of all dependencies listed here::
logilab-astng==0.20.1 \
logilab-common==0.50.3 \
pylint==0.21.1 \
pep8==0.6.1 \
pep8==1.2 \
coverage
For Haskell development, again all things from the quick install
......
......@@ -252,7 +252,7 @@ def GetMasterInfo():
except errors.ConfigurationError, err:
_Fail("Cluster configuration incomplete: %s", err, exc=True)
return (master_netdev, master_ip, master_node, primary_ip_family,
master_netmask)
master_netmask)
def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn):
......@@ -698,7 +698,7 @@ def VerifyNode(what, cluster_name):
else:
source = None
result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port,
source=source)
source=source)
if constants.NV_USERSCRIPTS in what:
result[constants.NV_USERSCRIPTS] = \
......@@ -3602,7 +3602,7 @@ class HooksRunner(object):
runparts_results = utils.RunParts(dir_name, env=env, reset_env=True)
for (relname, relstatus, runresult) in runparts_results:
for (relname, relstatus, runresult) in runparts_results:
if relstatus == constants.RUNPARTS_SKIP:
rrval = constants.HKR_SKIP
output = ""
......
......@@ -990,12 +990,13 @@ class BaseDRBD(BlockDev): # pylint: disable=W0223
first_line)
values = version.groups()
retval = {"k_major": int(values[0]),
"k_minor": int(values[1]),
"k_point": int(values[2]),
"api": int(values[3]),
"proto": int(values[4]),
}
retval = {
"k_major": int(values[0]),
"k_minor": int(values[1]),
"k_point": int(values[2]),
"api": int(values[3]),
"proto": int(values[4]),
}
if values[5] is not None:
retval["proto2"] = values[5]
......@@ -1393,7 +1394,7 @@ class DRBD8(BaseDRBD):
@classmethod
def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
disable_meta_flush):
disable_meta_flush):
"""Compute the DRBD command line parameters for disk barriers
Returns a list of the disk barrier parameters as requested via the
......@@ -1627,7 +1628,7 @@ class DRBD8(BaseDRBD):
"--c-delay-target", params[constants.LDP_DELAY_TARGET],
"--c-max-rate", params[constants.LDP_MAX_RATE],
"--c-min-rate", params[constants.LDP_MIN_RATE],
])
])
else:
args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
......
......@@ -779,7 +779,7 @@ def MasterFailover(no_voting=False):
msg = result.fail_msg
if msg:
logging.error("Could not disable the master role on the old master"
" %s, please disable manually: %s", old_master, msg)
" %s, please disable manually: %s", old_master, msg)
logging.info("Checking master IP non-reachability...")
......
......@@ -788,18 +788,19 @@ IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
metavar="<NAME>",
help="Set the default instance allocator plugin",
default=None, type="string",
completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
metavar="<NAME>",
help="Set the default instance"
" allocator plugin",
default=None, type="string",
completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
metavar="<os>",
completion_suggest=OPT_COMPL_ONE_OS)
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
type="keyval", default={},
help="OS parameters")
type="keyval", default={},
help="OS parameters")
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
action="store_true", default=False,
......@@ -848,7 +849,7 @@ SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
type="keyval", default={},
help="Disk size specs: list of key=value,"
" where key is one of min, max, std"
" where key is one of min, max, std"
" (in MB or using a unit)")
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
......@@ -857,10 +858,10 @@ SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
" where key is one of min, max, std")
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
dest="ipolicy_disk_templates",
type="list", default=None,
help="Comma-separated list of"
" enabled disk templates")
dest="ipolicy_disk_templates",
type="list", default=None,
help="Comma-separated list of"
" enabled disk templates")
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
dest="ipolicy_vcpu_ratio",
......@@ -1087,12 +1088,12 @@ DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
" (excluded from allocation operations)"))
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
type="bool", default=None, metavar=_YORNO,
help="Set the master_capable flag on the node")
type="bool", default=None, metavar=_YORNO,
help="Set the master_capable flag on the node")
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
type="bool", default=None, metavar=_YORNO,
help="Set the vm_capable flag on the node")
type="bool", default=None, metavar=_YORNO,
help="Set the vm_capable flag on the node")
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
type="bool", default=None, metavar=_YORNO,
......@@ -1149,11 +1150,12 @@ MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
default=None)
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
dest="use_external_mip_script",
help="Specify whether to run a user-provided"
" script for the master IP address turnup and"
" turndown operations",
type="bool", metavar=_YORNO, default=None)
dest="use_external_mip_script",
help="Specify whether to run a"
" user-provided script for the master"
" IP address turnup and"
" turndown operations",
type="bool", metavar=_YORNO, default=None)
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
help="Specify the default directory (cluster-"
......@@ -1162,14 +1164,13 @@ GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
metavar="DIR",
default=constants.DEFAULT_FILE_STORAGE_DIR)
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
dest="shared_file_storage_dir",
help="Specify the default directory (cluster-"
"wide) for storing the shared file-based"
" disks [%s]" %
constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
metavar="SHAREDDIR",
default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
"--shared-file-storage-dir",
dest="shared_file_storage_dir",
help="Specify the default directory (cluster-wide) for storing the"
" shared file-based disks [%s]" %
constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
metavar="SHAREDDIR", default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
help="Don't modify /etc/hosts",
......@@ -1207,9 +1208,10 @@ TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
help="Maximum time to wait")
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
dest="shutdown_timeout", type="int",
default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
help="Maximum time to wait for instance shutdown")
dest="shutdown_timeout", type="int",
default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
help="Maximum time to wait for instance"
" shutdown")
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
default=None,
......@@ -1237,19 +1239,19 @@ NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
" certificate"))
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
default=None,
help="File containing new SPICE certificate")
default=None,
help="File containing new SPICE certificate")
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
default=None,
help="File containing the certificate of the CA"
" which signed the SPICE certificate")
default=None,
help="File containing the certificate of the CA"
" which signed the SPICE certificate")
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
dest="new_spice_cert", default=None,
action="store_true",
help=("Generate a new self-signed SPICE"
" certificate"))
dest="new_spice_cert", default=None,
action="store_true",
help=("Generate a new self-signed SPICE"
" certificate"))
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
dest="new_confd_hmac_key",
......@@ -1307,10 +1309,10 @@ REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
" removed from the user-id pool"))
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
action="store", dest="reserved_lvs",
help=("A comma-separated list of reserved"
" logical volumes names, that will be"
" ignored by cluster verify"))
action="store", dest="reserved_lvs",
help=("A comma-separated list of reserved"
" logical volumes names, that will be"
" ignored by cluster verify"))
ROMAN_OPT = cli_option("--roman",
dest="roman_integers", default=False,
......@@ -1365,8 +1367,8 @@ NODE_POWERED_OPT = cli_option("--node-powered", default=None,
help="Specify if the SoR for node is powered")
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
default=constants.OOB_TIMEOUT,
help="Maximum time to wait for out-of-band helper")
default=constants.OOB_TIMEOUT,
help="Maximum time to wait for out-of-band helper")
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
default=constants.OOB_POWER_DELAY,
......@@ -2143,7 +2145,7 @@ def FormatError(err):
elif isinstance(err, errors.OpPrereqError):
if len(err.args) == 2:
obuf.write("Failure: prerequisites not met for this"
" operation:\nerror type: %s, error details:\n%s" %
" operation:\nerror type: %s, error details:\n%s" %
(err.args[1], err.args[0]))
else:
obuf.write("Failure: prerequisites not met for this"
......
......@@ -49,8 +49,8 @@ ON_OPT = cli_option("--on", default=False,
help="Recover from an EPO")
GROUPS_OPT = cli_option("--groups", default=False,
action="store_true", dest="groups",
help="Arguments are node groups instead of nodes")
action="store_true", dest="groups",
help="Arguments are node groups instead of nodes")
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
action="store_true",
......@@ -787,7 +787,7 @@ def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
return pem
def _RenewCrypto(new_cluster_cert, new_rapi_cert, #pylint: disable=R0911
def _RenewCrypto(new_cluster_cert, new_rapi_cert, # pylint: disable=R0911
rapi_cert_filename, new_spice_cert, spice_cert_filename,
spice_cacert_filename, new_confd_hmac_key, new_cds,
cds_filename, force):
......
......@@ -116,7 +116,7 @@ def _ExpandMultiNames(mode, names, client=None):
if not names:
raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL)
ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
False)
False)
ipri = [row[1] for row in ndata]
pri_names = list(itertools.chain(*ipri))
......
......@@ -92,7 +92,8 @@ def ListJobs(opts, args):
"summary": (lambda value: ",".join(str(item) for item in value), False),
}
fmtoverride.update(dict.fromkeys(["opstart", "opexec", "opend"],
(lambda value: map(FormatTimestamp, value), None)))
(lambda value: map(FormatTimestamp, value),
None)))
qfilter = qlang.MakeSimpleFilter("status", opts.status_filter)
......
......@@ -1208,8 +1208,8 @@ def _ComputeIPolicyInstanceViolation(ipolicy, instance,
disk_sizes, spindle_use)
def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
_compute_fn=_ComputeIPolicySpecViolation):
def _ComputeIPolicyInstanceSpecViolation(
ipolicy, instance_spec, _compute_fn=_ComputeIPolicySpecViolation):
"""Compute if instance specs meets the specs of ipolicy.
@type ipolicy: dict
......@@ -1920,10 +1920,11 @@ class LUClusterVerify(NoHooksLU):
# Always depend on global verification
depends_fn = lambda: [(-len(jobs), [])]
jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
ignore_errors=self.op.ignore_errors,
depends=depends_fn())]
for group in groups)
jobs.extend(
[opcodes.OpClusterVerifyGroup(group_name=group,
ignore_errors=self.op.ignore_errors,
depends=depends_fn())]
for group in groups)
# Fix up all parameters
for op in itertools.chain(*jobs): # pylint: disable=W0142
......@@ -2645,7 +2646,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
if drbd_helper:
helper_result = nresult.get(constants.NV_DRBDHELPER, None)
test = (helper_result == None)
test = (helper_result is None)
_ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
"no drbd usermode helper returned")
if helper_result:
......@@ -3572,9 +3573,9 @@ class LUGroupVerifyDisks(NoHooksLU):
res_instances = set()
res_missing = {}
nv_dict = _MapInstanceDisksToNodes([inst
for inst in self.instances.values()
if inst.admin_state == constants.ADMINST_UP])
nv_dict = _MapInstanceDisksToNodes(
[inst for inst in self.instances.values()
if inst.admin_state == constants.ADMINST_UP])
if nv_dict:
nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
......@@ -4330,11 +4331,13 @@ def _ComputeAncillaryFiles(cluster, redist):
files_mc.add(constants.CLUSTER_CONF_FILE)
# Files which should only be on VM-capable nodes
files_vm = set(filename
files_vm = set(
filename
for hv_name in cluster.enabled_hypervisors
for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
files_opt |= set(filename
files_opt |= set(
filename
for hv_name in cluster.enabled_hypervisors
for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
......@@ -4757,10 +4760,10 @@ class LUOobCommand(NoHooksLU):
type(result.payload))
if self.op.command in [
constants.OOB_POWER_ON,
constants.OOB_POWER_OFF,
constants.OOB_POWER_CYCLE,
]:
constants.OOB_POWER_ON,
constants.OOB_POWER_OFF,
constants.OOB_POWER_CYCLE,
]:
if result.payload is not None:
errs.append("%s is expected to not return payload but got '%s'" %
(self.op.command, result.payload))
......@@ -5636,7 +5639,7 @@ class LUNodeAdd(LogicalUnit):
if not newbie_singlehomed:
# check reachability from my secondary ip to newbie's secondary ip
if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
source=myself.secondary_ip):
source=myself.secondary_ip):
raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
" based ping to node daemon port",
errors.ECODE_ENVIRON)
......@@ -5814,10 +5817,10 @@ class LUNodeSetParams(LogicalUnit):
errors.ECODE_INVAL)
# Boolean value that tells us whether we might be demoting from MC
self.might_demote = (self.op.master_candidate == False or
self.op.offline == True or
self.op.drained == True or
self.op.master_capable == False)
self.might_demote = (self.op.master_candidate is False or
self.op.offline is True or
self.op.drained is True or
self.op.master_capable is False)
if self.op.secondary_ip:
if not netutils.IP4Address.IsValid(self.op.secondary_ip):
......@@ -5918,7 +5921,7 @@ class LUNodeSetParams(LogicalUnit):
" it a master candidate" % node.name,
errors.ECODE_STATE)
if self.op.vm_capable == False:
if self.op.vm_capable is False:
(ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
if ipri or isec:
raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
......@@ -5944,7 +5947,7 @@ class LUNodeSetParams(LogicalUnit):
# Check for ineffective changes
for attr in self._FLAGS:
if (getattr(self.op, attr) == False and getattr(node, attr) == False):
if (getattr(self.op, attr) is False and getattr(node, attr) is False):
self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
setattr(self.op, attr, None)
......@@ -5954,7 +5957,7 @@ class LUNodeSetParams(LogicalUnit):
# TODO: We might query the real power state if it supports OOB
if _SupportsOob(self.cfg, node):
if self.op.offline is False and not (node.powered or
self.op.powered == True):
self.op.powered is True):
raise errors.OpPrereqError(("Node %s needs to be turned on before its"
" offline status can be reset") %
self.op.node_name, errors.ECODE_STATE)
......@@ -5965,14 +5968,14 @@ class LUNodeSetParams(LogicalUnit):
errors.ECODE_STATE)
# If we're being deofflined/drained, we'll MC ourself if needed
if (self.op.drained == False or self.op.offline == False or
if (self.op.drained is False or self.op.offline is False or
(self.op.master_capable and not node.master_capable)):
if _DecideSelfPromotion(self):
self.op.master_candidate = True
self.LogInfo("Auto-promoting node to master candidate")
# If we're no longer master capable, we'll demote ourselves from MC
if self.op.master_capable == False and node.master_candidate:
if self.op.master_capable is False and node.master_candidate:
self.LogInfo("Demoting from master candidate")
self.op.master_candidate = False
......@@ -8279,8 +8282,8 @@ class TLMigrateInstance(Tasklet):
ial.required_nodes), errors.ECODE_FAULT)
self.target_node = ial.result[0]
self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.instance_name, self.lu.op.iallocator,
utils.CommaJoin(ial.result))
self.instance_name, self.lu.op.iallocator,
utils.CommaJoin(ial.result))
def _WaitUntilSync(self):
"""Poll with custom rpc for disk sync.
......@@ -8450,8 +8453,8 @@ class TLMigrateInstance(Tasklet):
# Don't raise an exception here, as we stil have to try to revert the
# disk status, even if this step failed.
abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
instance, False, self.live)
abort_result = self.rpc.call_instance_finalize_migration_src(
source_node, instance, False, self.live)
abort_msg = abort_result.fail_msg
if abort_msg:
logging.error("Aborting migration failed on source node %s: %s",
......@@ -8885,10 +8888,11 @@ _DISK_TEMPLATE_DEVICE_TYPE = {
}
def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node,
secondary_nodes, disk_info, file_storage_dir, file_driver, base_index,
feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
_req_shr_file_storage=opcodes.RequireSharedFileStorage):
def _GenerateDiskTemplate(
lu, template_name, instance_name, primary_node, secondary_nodes,
disk_info, file_storage_dir, file_driver, base_index,
feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
_req_shr_file_storage=opcodes.RequireSharedFileStorage):
"""Generate the entire disk layout for a given template type.
"""
......@@ -9825,8 +9829,8 @@ class LUInstanceCreate(LogicalUnit):
enabled_hvs = cluster.enabled_hypervisors
if self.op.hypervisor not in enabled_hvs:
raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
" cluster (%s)" % (self.op.hypervisor,
",".join(enabled_hvs)),
" cluster (%s)" %
(self.op.hypervisor, ",".join(enabled_hvs)),
errors.ECODE_STATE)
# Check tag validity
......@@ -10547,9 +10551,10 @@ class LUInstanceReplaceDisks(LogicalUnit):
assert not self.needed_locks[locking.LEVEL_NODE]
# Lock member nodes of all locked groups
self.needed_locks[locking.LEVEL_NODE] = [node_name
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
for node_name in self.cfg.GetNodeGroup(group_uuid).members]
self.needed_locks[locking.LEVEL_NODE] = \
[node_name
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
for node_name in self.cfg.GetNodeGroup(group_uuid).members]
else:
self._LockInstancesNodes()
elif level == locking.LEVEL_NODE_RES:
......@@ -12337,12 +12342,10 @@ class LUInstanceSetParams(LogicalUnit):
if self.op.hvparams:
_CheckGlobalHvParams(self.op.hvparams)
self.op.disks = \
self._UpgradeDiskNicMods("disk", self.op.disks,
opcodes.OpInstanceSetParams.TestDiskModifications)
self.op.nics = \
self._UpgradeDiskNicMods("NIC", self.op.nics,
opcodes.OpInstanceSetParams.TestNicModifications)
self.op.disks = self._UpgradeDiskNicMods(
"disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
self.op.nics = self._UpgradeDiskNicMods(
"NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
# Check disk modifications
self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
......@@ -12642,7 +12645,7 @@ class LUInstanceSetParams(LogicalUnit):
" free memory information" % pnode)
elif instance_info.fail_msg:
self.warn.append("Can't get instance runtime information: %s" %
instance_info.fail_msg)
instance_info.fail_msg)
else:
if instance_info.payload:
current_mem = int(instance_info.payload["memory"])
......@@ -12694,7 +12697,8 @@ class LUInstanceSetParams(LogicalUnit):
self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
raise errors.OpPrereqError("Instance %s must have memory between %d"
" and %d MB of memory unless --force is"
" given" % (instance.name,
" given" %
(instance.name,
self.be_proposed[constants.BE_MINMEM],
self.be_proposed[constants.BE_MAXMEM]),
errors.ECODE_INVAL)
......@@ -15202,12 +15206,12 @@ class IAllocator(object):
ht.TItems([ht.TNonEmptyString,
ht.TNonEmptyString,
ht.TListOf(ht.TNonEmptyString),
])))
])))
_NEVAC_FAILED = \
ht.TListOf(ht.TAnd(ht.TIsLength(2),
ht.TItems([ht.TNonEmptyString,
ht.TMaybeString,
])))
])))
_NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
......
#
#
# Copyright (C) 2009, 2010 Google Inc.
# Copyright (C) 2009, 2010, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -283,7 +283,7 @@ class ConfdClient:
server_port=port,
extra_args=rq.args,
client=self,
)
)
self._callback(client_reply)
finally:
......
......@@ -1054,10 +1054,9 @@ class ConfigWriter:
"""
cluster = self._config_data.cluster
result = objects.MasterNetworkParameters(name=cluster.master_node,
ip=cluster.master_ip,
netmask=cluster.master_netmask,
netdev=cluster.master_netdev,
result = objects.MasterNetworkParameters(
name=cluster.master_node, ip=cluster.master_ip,
netmask=cluster.master_netmask, netdev=cluster.master_netdev,
ip_family=cluster.primary_ip_family)
return result
......
......@@ -1816,8 +1816,7 @@ HVC_DEFAULTS = {
HV_CPU_MASK: CPU_PINNING_ALL,
HV_CPU_TYPE: "",
},
HT_FAKE: {
},
HT_FAKE: {},
HT_CHROOT: {
HV_INIT_SCRIPT: "/ganeti-chroot",
},
......@@ -1870,10 +1869,8 @@ DISK_LD_DEFAULTS = {