Commit 5eacbcae authored by Thomas Thrainer's avatar Thomas Thrainer
Browse files

cmdlib: Cleanup public/private functions



All functions/classes which are used outside of their defining module
(with tests as an exception) no longer have a leading underscore.
Signed-off-by: default avatarThomas Thrainer <thomasth@google.com>
Reviewed-by: default avatarBernardo Dal Seno <bdalseno@google.com>
parent 8aa8f6b1
......@@ -33,16 +33,16 @@ from ganeti import qlang
from ganeti import query
from ganeti import utils
from ganeti.cmdlib.base import _QueryBase, NoHooksLU, LogicalUnit
from ganeti.cmdlib.common import _GetWantedNodes, _ShareAll, \
_CheckNodeOnline, _ExpandNodeName
from ganeti.cmdlib.instance_storage import _StartInstanceDisks, \
_ShutdownInstanceDisks
from ganeti.cmdlib.instance_utils import _GetClusterDomainSecret, \
_BuildInstanceHookEnvByObject, _CheckNodeNotDrained, _RemoveInstance
from ganeti.cmdlib.base import QueryBase, NoHooksLU, LogicalUnit
from ganeti.cmdlib.common import GetWantedNodes, ShareAll, CheckNodeOnline, \
ExpandNodeName
from ganeti.cmdlib.instance_storage import StartInstanceDisks, \
ShutdownInstanceDisks
from ganeti.cmdlib.instance_utils import GetClusterDomainSecret, \
BuildInstanceHookEnvByObject, CheckNodeNotDrained, RemoveInstance
class _ExportQuery(_QueryBase):
class ExportQuery(QueryBase):
FIELDS = query.EXPORT_FIELDS
#: The node name is not a unique key for this query
......@@ -53,14 +53,14 @@ class _ExportQuery(_QueryBase):
# The following variables interact with _QueryBase._GetNames
if self.names:
self.wanted = _GetWantedNodes(lu, self.names)
self.wanted = GetWantedNodes(lu, self.names)
else:
self.wanted = locking.ALL_SET
self.do_locking = self.use_locking
if self.do_locking:
lu.share_locks = _ShareAll()
lu.share_locks = ShareAll()
lu.needed_locks = {
locking.LEVEL_NODE: self.wanted,
}
......@@ -102,8 +102,8 @@ class LUBackupQuery(NoHooksLU):
REQ_BGL = False
def CheckArguments(self):
self.expq = _ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
["node", "export"], self.op.use_locking)
self.expq = ExportQuery(qlang.MakeSimpleFilter("node", self.op.nodes),
["node", "export"], self.op.use_locking)
def ExpandNames(self):
self.expq.ExpandNames(self)
......@@ -141,9 +141,9 @@ class LUBackupPrepare(NoHooksLU):
self.instance = self.cfg.GetInstanceInfo(instance_name)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
_CheckNodeOnline(self, self.instance.primary_node)
CheckNodeOnline(self, self.instance.primary_node)
self._cds = _GetClusterDomainSecret()
self._cds = GetClusterDomainSecret()
def Exec(self, feedback_fn):
"""Prepares an instance for an export.
......@@ -237,7 +237,7 @@ class LUBackupExport(LogicalUnit):
"REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
}
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
......@@ -263,7 +263,7 @@ class LUBackupExport(LogicalUnit):
self.instance = self.cfg.GetInstanceInfo(instance_name)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
_CheckNodeOnline(self, self.instance.primary_node)
CheckNodeOnline(self, self.instance.primary_node)
if (self.op.remove_instance and
self.instance.admin_state == constants.ADMINST_UP and
......@@ -272,12 +272,12 @@ class LUBackupExport(LogicalUnit):
" down before", errors.ECODE_STATE)
if self.op.mode == constants.EXPORT_MODE_LOCAL:
self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
self.op.target_node = ExpandNodeName(self.cfg, self.op.target_node)
self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
assert self.dst_node is not None
_CheckNodeOnline(self, self.dst_node.name)
_CheckNodeNotDrained(self, self.dst_node.name)
CheckNodeOnline(self, self.dst_node.name)
CheckNodeNotDrained(self, self.dst_node.name)
self._cds = None
self.dest_disk_info = None
......@@ -293,7 +293,7 @@ class LUBackupExport(LogicalUnit):
len(self.instance.disks)),
errors.ECODE_INVAL)
cds = _GetClusterDomainSecret()
cds = GetClusterDomainSecret()
# Check X509 key name
try:
......@@ -403,7 +403,7 @@ class LUBackupExport(LogicalUnit):
if activate_disks:
# Activate the instance disks if we'exporting a stopped instance
feedback_fn("Activating disks for %s" % instance.name)
_StartInstanceDisks(self, instance, None)
StartInstanceDisks(self, instance, None)
try:
helper = masterd.instance.ExportInstanceHelper(self, feedback_fn,
......@@ -422,7 +422,7 @@ class LUBackupExport(LogicalUnit):
msg = result.fail_msg
if msg:
feedback_fn("Failed to start instance: %s" % msg)
_ShutdownInstanceDisks(self, instance)
ShutdownInstanceDisks(self, instance)
raise errors.OpExecError("Could not start instance: %s" % msg)
if self.op.mode == constants.EXPORT_MODE_LOCAL:
......@@ -451,7 +451,7 @@ class LUBackupExport(LogicalUnit):
finally:
if activate_disks:
feedback_fn("Deactivating disks for %s" % instance.name)
_ShutdownInstanceDisks(self, instance)
ShutdownInstanceDisks(self, instance)
if not (compat.all(dresults) and fin_resu):
failures = []
......@@ -470,8 +470,8 @@ class LUBackupExport(LogicalUnit):
# Remove instance if requested
if self.op.remove_instance:
feedback_fn("Removing instance %s" % instance.name)
_RemoveInstance(self, feedback_fn, instance,
self.op.ignore_remove_failures)
RemoveInstance(self, feedback_fn, instance,
self.op.ignore_remove_failures)
if self.op.mode == constants.EXPORT_MODE_LOCAL:
self._CleanupExports(feedback_fn)
......
......@@ -28,7 +28,7 @@ from ganeti import constants
from ganeti import locking
from ganeti import query
from ganeti import utils
from ganeti.cmdlib.common import _ExpandInstanceName
from ganeti.cmdlib.common import ExpandInstanceName
class ResultWithJobs:
......@@ -319,8 +319,8 @@ class LogicalUnit(object):
else:
assert locking.LEVEL_INSTANCE not in self.needed_locks, \
"_ExpandAndLockInstance called with instance-level locks set"
self.op.instance_name = _ExpandInstanceName(self.cfg,
self.op.instance_name)
self.op.instance_name = ExpandInstanceName(self.cfg,
self.op.instance_name)
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
def _LockInstancesNodes(self, primary_only=False,
......@@ -444,7 +444,7 @@ class Tasklet:
raise NotImplementedError
class _QueryBase:
class QueryBase:
"""Base for query utility classes.
"""
......
......@@ -49,15 +49,14 @@ from ganeti import uidpool
from ganeti import utils
from ganeti import vcluster
from ganeti.cmdlib.base import NoHooksLU, _QueryBase, LogicalUnit, \
from ganeti.cmdlib.base import NoHooksLU, QueryBase, LogicalUnit, \
ResultWithJobs
from ganeti.cmdlib.common import _ShareAll, _RunPostHook, \
_ComputeAncillaryFiles, _RedistributeAncillaryFiles, _UploadHelper, \
_GetWantedInstances, _MergeAndVerifyHvState, _MergeAndVerifyDiskState, \
_GetUpdatedIPolicy, _ComputeNewInstanceViolations, _GetUpdatedParams, \
_CheckOSParams, _CheckHVParams, _AdjustCandidatePool, _CheckNodePVs, \
_ComputeIPolicyInstanceViolation, _AnnotateDiskParams, \
_SupportsOob
from ganeti.cmdlib.common import ShareAll, RunPostHook, \
ComputeAncillaryFiles, RedistributeAncillaryFiles, UploadHelper, \
GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob
import ganeti.masterd.instance
......@@ -99,7 +98,7 @@ class LUClusterConfigQuery(NoHooksLU):
REQ_BGL = False
def CheckArguments(self):
self.cq = _ClusterQuery(None, self.op.output_fields, False)
self.cq = ClusterQuery(None, self.op.output_fields, False)
def ExpandNames(self):
self.cq.ExpandNames(self)
......@@ -164,7 +163,7 @@ class LUClusterDestroy(LogicalUnit):
master_params = self.cfg.GetMasterNetworkParameters()
# Run post hooks on master node before it's removed
_RunPostHook(self, master_params.name)
RunPostHook(self, master_params.name)
ems = self.cfg.GetUseExternalMipScript()
result = self.rpc.call_node_deactivate_master_ip(master_params.name,
......@@ -204,7 +203,7 @@ class LUClusterPostInit(LogicalUnit):
return True
class _ClusterQuery(_QueryBase):
class ClusterQuery(QueryBase):
FIELDS = query.CLUSTER_FIELDS
#: Do not sort (there is only one item)
......@@ -344,14 +343,14 @@ class LUClusterRedistConf(NoHooksLU):
locking.LEVEL_NODE: locking.ALL_SET,
locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
self.share_locks = _ShareAll()
self.share_locks = ShareAll()
def Exec(self, feedback_fn):
"""Redistribute the configuration.
"""
self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn)
_RedistributeAncillaryFiles(self)
RedistributeAncillaryFiles(self)
class LUClusterRename(LogicalUnit):
......@@ -426,7 +425,7 @@ class LUClusterRename(LogicalUnit):
node_list.remove(master_params.name)
except ValueError:
pass
_UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
finally:
master_params.ip = new_ip
result = self.rpc.call_node_activate_master_ip(master_params.name,
......@@ -447,7 +446,7 @@ class LUClusterRepairDiskSizes(NoHooksLU):
def ExpandNames(self):
if self.op.instances:
self.wanted_names = _GetWantedInstances(self, self.op.instances)
self.wanted_names = GetWantedInstances(self, self.op.instances)
# Not getting the node allocation lock as only a specific set of
# instances (and their nodes) is going to be acquired
self.needed_locks = {
......@@ -633,7 +632,7 @@ class LUClusterSetParams(LogicalUnit):
locking.LEVEL_NODEGROUP: locking.ALL_SET,
locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
self.share_locks = _ShareAll()
self.share_locks = ShareAll()
def BuildHooksEnv(self):
"""Build hooks env.
......@@ -727,22 +726,22 @@ class LUClusterSetParams(LogicalUnit):
constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
if self.op.hv_state:
new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
self.cluster.hv_state_static)
new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
self.cluster.hv_state_static)
self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
for hv, values in new_hv_state.items())
if self.op.disk_state:
new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state,
self.cluster.disk_state_static)
new_disk_state = MergeAndVerifyDiskState(self.op.disk_state,
self.cluster.disk_state_static)
self.new_disk_state = \
dict((storage, dict((name, cluster.SimpleFillDiskState(values))
for name, values in svalues.items()))
for storage, svalues in new_disk_state.items())
if self.op.ipolicy:
self.new_ipolicy = _GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
group_policy=False)
self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
group_policy=False)
all_instances = self.cfg.GetAllInstancesInfo().values()
violations = set()
......@@ -752,8 +751,8 @@ class LUClusterSetParams(LogicalUnit):
for node in inst.all_nodes)])
new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
new = _ComputeNewInstanceViolations(ipol,
new_ipolicy, instances, self.cfg)
new = ComputeNewInstanceViolations(ipol,
new_ipolicy, instances, self.cfg)
if new:
violations.update(new)
......@@ -831,16 +830,16 @@ class LUClusterSetParams(LogicalUnit):
if os_name not in self.new_osp:
self.new_osp[os_name] = {}
self.new_osp[os_name] = _GetUpdatedParams(self.new_osp[os_name], osp,
use_none=True)
self.new_osp[os_name] = GetUpdatedParams(self.new_osp[os_name], osp,
use_none=True)
if not self.new_osp[os_name]:
# we removed all parameters
del self.new_osp[os_name]
else:
# check the parameter validity (remote check)
_CheckOSParams(self, False, [self.cfg.GetMasterNode()],
os_name, self.new_osp[os_name])
CheckOSParams(self, False, [self.cfg.GetMasterNode()],
os_name, self.new_osp[os_name])
# changes to the hypervisor list
if self.op.enabled_hypervisors is not None:
......@@ -868,7 +867,7 @@ class LUClusterSetParams(LogicalUnit):
hv_class = hypervisor.GetHypervisorClass(hv_name)
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class.CheckParameterSyntax(hv_params)
_CheckHVParams(self, node_list, hv_name, hv_params)
CheckHVParams(self, node_list, hv_name, hv_params)
self._CheckDiskTemplateConsistency()
......@@ -883,7 +882,7 @@ class LUClusterSetParams(LogicalUnit):
new_osp = objects.FillDict(cluster_defaults, hv_params)
hv_class = hypervisor.GetHypervisorClass(hv_name)
hv_class.CheckParameterSyntax(new_osp)
_CheckHVParams(self, node_list, hv_name, new_osp)
CheckHVParams(self, node_list, hv_name, new_osp)
if self.op.default_iallocator:
alloc_script = utils.FindFile(self.op.default_iallocator,
......@@ -963,7 +962,7 @@ class LUClusterSetParams(LogicalUnit):
if self.op.candidate_pool_size is not None:
self.cluster.candidate_pool_size = self.op.candidate_pool_size
# we need to update the pool size here, otherwise the save will fail
_AdjustCandidatePool(self, [])
AdjustCandidatePool(self, [])
if self.op.maintain_node_health is not None:
if self.op.maintain_node_health and not constants.ENABLE_CONFD:
......@@ -1242,7 +1241,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
def ExpandNames(self):
self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
self.share_locks = _ShareAll()
self.share_locks = ShareAll()
def CheckPrereq(self):
"""Check prerequisites.
......@@ -1399,7 +1398,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
self.share_locks = _ShareAll()
self.share_locks = ShareAll()
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
......@@ -1607,7 +1606,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
_ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
# Check PVs
(errmsgs, pvminmax) = _CheckNodePVs(nresult, self._exclusive_storage)
(errmsgs, pvminmax) = CheckNodePVs(nresult, self._exclusive_storage)
for em in errmsgs:
self._Error(constants.CV_ENODELVM, node, em)
if pvminmax is not None:
......@@ -1748,7 +1747,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
cluster = self.cfg.GetClusterInfo()
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
self.group_info)
err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
err = ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
_ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err),
code=self.ETYPE_WARNING)
......@@ -2354,7 +2353,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
# _AnnotateDiskParams makes already copies of the disks
devonly = []
for (inst, dev) in disks:
(anno_disk,) = _AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
(anno_disk,) = AnnotateDiskParams(instanceinfo[inst], [dev], self.cfg)
self.cfg.SetDiskID(anno_disk, nname)
devonly.append(anno_disk)
......@@ -2505,7 +2504,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
# FIXME: verify OS list
# File verification
filemap = _ComputeAncillaryFiles(cluster, False)
filemap = ComputeAncillaryFiles(cluster, False)
# do local checksums
master_node = self.master_node = self.cfg.GetMasterNode()
......@@ -2580,7 +2579,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
# Gather OOB paths
oob_paths = []
for node in self.all_node_info.values():
path = _SupportsOob(self.cfg, node)
path = SupportsOob(self.cfg, node)
if path and path not in oob_paths:
oob_paths.append(path)
......@@ -2862,7 +2861,7 @@ class LUClusterVerifyDisks(NoHooksLU):
REQ_BGL = False
def ExpandNames(self):
self.share_locks = _ShareAll()
self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_NODEGROUP: locking.ALL_SET,
}
......
......@@ -65,24 +65,24 @@ def _ExpandItemName(fn, name, kind):
return full_name
def _ExpandInstanceName(cfg, name):
def ExpandInstanceName(cfg, name):
"""Wrapper over L{_ExpandItemName} for instance."""
return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
def _ExpandNodeName(cfg, name):
def ExpandNodeName(cfg, name):
"""Wrapper over L{_ExpandItemName} for nodes."""
return _ExpandItemName(cfg.ExpandNodeName, name, "Node")
def _ShareAll():
def ShareAll():
"""Returns a dict declaring all lock levels shared.
"""
return dict.fromkeys(locking.LEVELS, 1)
def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
def CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
"""Checks if the instances in a node group are still correct.
@type cfg: L{config.ConfigWriter}
......@@ -106,7 +106,7 @@ def _CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
return wanted_instances
def _GetWantedNodes(lu, nodes):
def GetWantedNodes(lu, nodes):
"""Returns list of checked and expanded node names.
@type lu: L{LogicalUnit}
......@@ -119,12 +119,12 @@ def _GetWantedNodes(lu, nodes):
"""
if nodes:
return [_ExpandNodeName(lu.cfg, name) for name in nodes]
return [ExpandNodeName(lu.cfg, name) for name in nodes]
return utils.NiceSort(lu.cfg.GetNodeList())
def _GetWantedInstances(lu, instances):
def GetWantedInstances(lu, instances):
"""Returns list of checked and expanded instance names.
@type lu: L{LogicalUnit}
......@@ -138,13 +138,13 @@ def _GetWantedInstances(lu, instances):
"""
if instances:
wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
wanted = [ExpandInstanceName(lu.cfg, name) for name in instances]
else:
wanted = utils.NiceSort(lu.cfg.GetInstanceList())
return wanted
def _RunPostHook(lu, node_name):
def RunPostHook(lu, node_name):
"""Runs the post-hook for an opcode on a single node.
"""
......@@ -156,7 +156,7 @@ def _RunPostHook(lu, node_name):
node_name, err)
def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
def RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
"""Distribute additional files which are part of the cluster configuration.
ConfigWriter takes care of distributing the config and ssconf files, but
......@@ -189,7 +189,7 @@ def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
# Gather file lists
(files_all, _, files_mc, files_vm) = \
_ComputeAncillaryFiles(cluster, True)
ComputeAncillaryFiles(cluster, True)
# Never re-distribute configuration file from here
assert not (pathutils.CLUSTER_CONF_FILE in files_all or
......@@ -204,10 +204,10 @@ def _RedistributeAncillaryFiles(lu, additional_nodes=None, additional_vm=True):
# Upload the files
for (node_list, files) in filemap:
for fname in files:
_UploadHelper(lu, node_list, fname)
UploadHelper(lu, node_list, fname)
def _ComputeAncillaryFiles(cluster, redist):
def ComputeAncillaryFiles(cluster, redist):
"""Compute files external to Ganeti which need to be consistent.
@type redist: boolean
......@@ -286,7 +286,7 @@ def _ComputeAncillaryFiles(cluster, redist):
return (files_all, files_opt, files_mc, files_vm)
def _UploadHelper(lu, nodes, fname):
def UploadHelper(lu, nodes, fname):
"""Helper for uploading a file and showing warnings.
"""
......@@ -300,7 +300,7 @@ def _UploadHelper(lu, nodes, fname):
lu.LogWarning(msg)
def _MergeAndVerifyHvState(op_input, obj_input):
def MergeAndVerifyHvState(op_input, obj_input):
"""Combines the hv state from an opcode with the one of the object
@param op_input: The input dict from the opcode
......@@ -322,7 +322,7 @@ def _MergeAndVerifyHvState(op_input, obj_input):
return None
def _MergeAndVerifyDiskState(op_input, obj_input):
def MergeAndVerifyDiskState(op_input, obj_input):
"""Combines the disk state from an opcode with the one of the object
@param op_input: The input dict from the opcode
......@@ -345,7 +345,7 @@ def _MergeAndVerifyDiskState(op_input, obj_input):
return None
def _CheckOSParams(lu, required, nodenames, osname, osparams):
def CheckOSParams(lu, required, nodenames, osname, osparams):
"""OS parameters validation.
@type lu: L{LogicalUnit}
......@@ -375,7 +375,7 @@ def _CheckOSParams(lu, required, nodenames, osname, osparams):
osname, node)
def _CheckHVParams(lu, nodenames, hvname, hvparams):
def CheckHVParams(lu, nodenames, hvname, hvparams):
"""Hypervisor parameter validation.
This function abstract the hypervisor parameter validation to be
......@@ -405,7 +405,7 @@ def _CheckHVParams(lu, nodenames, hvname, hvparams):
info.Raise("Hypervisor parameter validation failed on node %s" % node)
def _AdjustCandidatePool(lu, exceptions):
def AdjustCandidatePool(lu, exceptions):
"""Adjust the candidate pool after node operations.
"""
......@@ -421,7 +421,7 @@ def _AdjustCandidatePool(lu, exceptions):
(mc_now, mc_max))
def _CheckNodePVs(nresult, exclusive_storage):
def CheckNodePVs(nresult, exclusive_storage):
"""Check node PVs.
"""
......@@ -475,10 +475,10 @@ def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
return None
def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
nic_count, disk_sizes, spindle_use,
disk_template,
_compute_fn=_ComputeMinMaxSpec):
def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
nic_count, disk_sizes, spindle_use,
disk_template,
_compute_fn=_ComputeMinMaxSpec):
"""Verifies ipolicy against provided specs.
@type ipolicy: dict
......@@ -530,8 +530,8 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
return ret + min_errs
def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
_compute_fn=_ComputeIPolicySpecViolation):
def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
_compute_fn=ComputeIPolicySpecViolation):
"""Compute if instance meets the specs of ipolicy.
@type ipolicy: dict
......@@ -541,7 +541,7 @@ def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration
@param _compute_fn: The function to verify ipolicy (unittest only)
@see: L{_ComputeIPolicySpecViolation}
@see: L{ComputeIPolicySpecViolation}
"""
be_full = cfg.GetClusterInfo().FillBE(instance)
......@@ -569,10 +569,10 @@ def _ComputeViolatingInstances(ipolicy, instances, cfg):
"""
return frozenset([inst.name for inst in instances
if _ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
def ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
"""Computes a set of any instances that would violate the new ipolicy.
@param old_ipolicy: The current (still in-place) ipolicy
......@@ -588,7 +588,7 @@ def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
_ComputeViolatingInstances(old_ipolicy, instances, cfg))
def _GetUpdatedParams(old_params, update_dict,
def GetUpdatedParams(old_params, update_dict,
use_default=True, use_none=False):
"""Return the new version of a parameter dictionary.