diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 13eddd000f532dc8fb7f7b96993f9fb5b8fedb4a..31d3d765395919427385102a176aa7279be37352 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -827,10 +827,10 @@ def _GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False): raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key, errors.ECODE_INVAL) if key in constants.IPOLICY_ISPECS: - utils.ForceDictType(value, constants.ISPECS_PARAMETER_TYPES) ipolicy[key] = _GetUpdatedParams(old_ipolicy.get(key, {}), value, use_none=use_none, use_default=use_default) + utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES) else: if (not value or value == [constants.VALUE_DEFAULT] or value == constants.VALUE_DEFAULT): @@ -1231,6 +1231,7 @@ def _ComputeMinMaxSpec(name, qualifier, ipolicy, value): def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count, nic_count, disk_sizes, spindle_use, + disk_template, _compute_fn=_ComputeMinMaxSpec): """Verifies ipolicy against provided specs. @@ -1248,6 +1249,8 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count, @param disk_sizes: Disk sizes of used disk (len must match C{disk_count}) @type spindle_use: int @param spindle_use: The number of spindles this instance uses + @type disk_template: string + @param disk_template: The disk template of the instance @param _compute_fn: The compute function (unittest only) @return: A list of violations, or an empty list of no violations are found @@ -1257,18 +1260,25 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count, test_settings = [ (constants.ISPEC_MEM_SIZE, "", mem_size), (constants.ISPEC_CPU_COUNT, "", cpu_count), - (constants.ISPEC_DISK_COUNT, "", disk_count), (constants.ISPEC_NIC_COUNT, "", nic_count), (constants.ISPEC_SPINDLE_USE, "", spindle_use), ] + [(constants.ISPEC_DISK_SIZE, str(idx), d) for idx, d in enumerate(disk_sizes)] + if disk_template != constants.DT_DISKLESS: + # This check doesn't make sense for diskless instances + test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count)) + ret = [] + allowed_dts = ipolicy[constants.IPOLICY_DTS] + if disk_template not in allowed_dts: + ret.append("Disk template %s is not allowed (allowed templates: %s)" % + (disk_template, utils.CommaJoin(allowed_dts))) - return filter(None, - (_compute_fn(name, qualifier, ipolicy, value) - for (name, qualifier, value) in test_settings)) + return ret + filter(None, + (_compute_fn(name, qualifier, ipolicy, value) + for (name, qualifier, value) in test_settings)) -def _ComputeIPolicyInstanceViolation(ipolicy, instance, +def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg, _compute_fn=_ComputeIPolicySpecViolation): """Compute if instance meets the specs of ipolicy. @@ -1276,29 +1286,36 @@ def _ComputeIPolicyInstanceViolation(ipolicy, instance, @param ipolicy: The ipolicy to verify against @type instance: L{objects.Instance} @param instance: The instance to verify + @type cfg: L{config.ConfigWriter} + @param cfg: Cluster configuration @param _compute_fn: The function to verify ipolicy (unittest only) @see: L{_ComputeIPolicySpecViolation} """ - mem_size = instance.beparams.get(constants.BE_MAXMEM, None) - cpu_count = instance.beparams.get(constants.BE_VCPUS, None) - spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None) + be_full = cfg.GetClusterInfo().FillBE(instance) + mem_size = be_full[constants.BE_MAXMEM] + cpu_count = be_full[constants.BE_VCPUS] + spindle_use = be_full[constants.BE_SPINDLE_USE] disk_count = len(instance.disks) disk_sizes = [disk.size for disk in instance.disks] nic_count = len(instance.nics) + disk_template = instance.disk_template return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count, - disk_sizes, spindle_use) + disk_sizes, spindle_use, disk_template) def _ComputeIPolicyInstanceSpecViolation( - ipolicy, instance_spec, _compute_fn=_ComputeIPolicySpecViolation): + ipolicy, instance_spec, disk_template, + _compute_fn=_ComputeIPolicySpecViolation): """Compute if instance specs meets the specs of ipolicy. @type ipolicy: dict @param ipolicy: The ipolicy to verify against @param instance_spec: dict @param instance_spec: The instance spec to verify + @type disk_template: string + @param disk_template: the disk template of the instance @param _compute_fn: The function to verify ipolicy (unittest only) @see: L{_ComputeIPolicySpecViolation} @@ -1311,11 +1328,11 @@ def _ComputeIPolicyInstanceSpecViolation( spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None) return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count, - disk_sizes, spindle_use) + disk_sizes, spindle_use, disk_template) def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group, - target_group, + target_group, cfg, _compute_fn=_ComputeIPolicyInstanceViolation): """Compute if instance meets the specs of the new target group. @@ -1323,6 +1340,8 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group, @param instance: The instance object to verify @param current_group: The current group of the instance @param target_group: The new group of the instance + @type cfg: L{config.ConfigWriter} + @param cfg: Cluster configuration @param _compute_fn: The function to verify ipolicy (unittest only) @see: L{_ComputeIPolicySpecViolation} @@ -1330,23 +1349,25 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group, if current_group == target_group: return [] else: - return _compute_fn(ipolicy, instance) + return _compute_fn(ipolicy, instance, cfg) -def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False, +def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False, _compute_fn=_ComputeIPolicyNodeViolation): """Checks that the target node is correct in terms of instance policy. @param ipolicy: The ipolicy to verify @param instance: The instance object to verify @param node: The new node to relocate + @type cfg: L{config.ConfigWriter} + @param cfg: Cluster configuration @param ignore: Ignore violations of the ipolicy @param _compute_fn: The function to verify ipolicy (unittest only) @see: L{_ComputeIPolicySpecViolation} """ primary_node = lu.cfg.GetNodeInfo(instance.primary_node) - res = _compute_fn(ipolicy, instance, primary_node.group, node.group) + res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg) if res: msg = ("Instance does not meet target node group's (%s) instance" @@ -1357,18 +1378,20 @@ def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False, raise errors.OpPrereqError(msg, errors.ECODE_INVAL) -def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances): +def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg): """Computes a set of any instances that would violate the new ipolicy. @param old_ipolicy: The current (still in-place) ipolicy @param new_ipolicy: The new (to become) ipolicy @param instances: List of instances to verify + @type cfg: L{config.ConfigWriter} + @param cfg: Cluster configuration @return: A list of instances which violates the new ipolicy but did not before """ - return (_ComputeViolatingInstances(new_ipolicy, instances) - - _ComputeViolatingInstances(old_ipolicy, instances)) + return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) - + _ComputeViolatingInstances(old_ipolicy, instances, cfg)) def _ExpandItemName(fn, name, kind): @@ -1647,17 +1670,19 @@ def _DecideSelfPromotion(lu, exceptions=None): return mc_now < mc_should -def _ComputeViolatingInstances(ipolicy, instances): +def _ComputeViolatingInstances(ipolicy, instances, cfg): """Computes a set of instances who violates given ipolicy. @param ipolicy: The ipolicy to verify - @type instances: object.Instance + @type instances: L{objects.Instance} @param instances: List of instances to verify + @type cfg: L{config.ConfigWriter} + @param cfg: Cluster configuration @return: A frozenset of instance names violating the ipolicy """ return frozenset([inst.name for inst in instances - if _ComputeIPolicyInstanceViolation(ipolicy, inst)]) + if _ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)]) def _CheckNicsBridgesExist(lu, target_nics, target_node): @@ -2626,7 +2651,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors): cluster = self.cfg.GetClusterInfo() ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, self.group_info) - err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config) + err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg) _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err), code=self.ETYPE_WARNING) @@ -4266,7 +4291,7 @@ class LUClusterSetParams(LogicalUnit): new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy) ipol = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group) new = _ComputeNewInstanceViolations(ipol, - new_ipolicy, instances) + new_ipolicy, instances, self.cfg) if new: violations.update(new) @@ -8389,7 +8414,7 @@ class LUInstanceMove(LogicalUnit): cluster = self.cfg.GetClusterInfo() group_info = self.cfg.GetNodeGroup(node.group) ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) - _CheckTargetNodeIPolicy(self, ipolicy, instance, node, + _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg, ignore=self.op.ignore_ipolicy) if instance.admin_state == constants.ADMINST_UP: @@ -8664,7 +8689,7 @@ class TLMigrateInstance(Tasklet): group_info = self.cfg.GetNodeGroup(nodeinfo.group) ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) - _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, + _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg, ignore=self.ignore_ipolicy) # self.target_node is already populated, either directly or by the @@ -8708,7 +8733,7 @@ class TLMigrateInstance(Tasklet): group_info = self.cfg.GetNodeGroup(nodeinfo.group) ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) - _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, + _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg, ignore=self.ignore_ipolicy) i_be = cluster.FillBE(instance) @@ -10769,25 +10794,6 @@ class LUInstanceCreate(LogicalUnit): nodenames = [pnode.name] + self.secondaries - # Verify instance specs - spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None) - ispec = { - constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None), - constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None), - constants.ISPEC_DISK_COUNT: len(self.disks), - constants.ISPEC_DISK_SIZE: [disk["size"] for disk in self.disks], - constants.ISPEC_NIC_COUNT: len(self.nics), - constants.ISPEC_SPINDLE_USE: spindle_use, - } - - group_info = self.cfg.GetNodeGroup(pnode.group) - ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) - res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec) - if not self.op.ignore_ipolicy and res: - msg = ("Instance allocation to group %s (%s) violates policy: %s" % - (pnode.group, group_info.name, utils.CommaJoin(res))) - raise errors.OpPrereqError(msg, errors.ECODE_INVAL) - if not self.adopt_disks: if self.op.disk_template == constants.DT_RBD: # _CheckRADOSFreeSpace() is just a placeholder. @@ -10886,12 +10892,12 @@ class LUInstanceCreate(LogicalUnit): group_info = self.cfg.GetNodeGroup(pnode.group) ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) - res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec) + res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec, + self.op.disk_template) if not self.op.ignore_ipolicy and res: - raise errors.OpPrereqError(("Instance allocation to group %s violates" - " policy: %s") % (pnode.group, - utils.CommaJoin(res)), - errors.ECODE_INVAL) + msg = ("Instance allocation to group %s (%s) violates policy: %s" % + (pnode.group, group_info.name, utils.CommaJoin(res))) + raise errors.OpPrereqError(msg, errors.ECODE_INVAL) _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams) @@ -11738,7 +11744,7 @@ class TLReplaceDisks(Tasklet): ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, new_group_info) _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info, - ignore=self.ignore_ipolicy) + self.cfg, ignore=self.ignore_ipolicy) for node in check_nodes: _CheckNodeOnline(self.lu, node) @@ -13586,7 +13592,7 @@ class LUInstanceSetParams(LogicalUnit): snode_group = self.cfg.GetNodeGroup(snode_info.group) ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, snode_group) - _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, + _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg, ignore=self.op.ignore_ipolicy) if pnode_info.group != snode_info.group: self.LogWarning("The primary and secondary nodes are in two" @@ -13913,14 +13919,20 @@ class LUInstanceSetParams(LogicalUnit): None) # Copy ispec to verify parameters with min/max values separately + if self.op.disk_template: + new_disk_template = self.op.disk_template + else: + new_disk_template = instance.disk_template ispec_max = ispec.copy() ispec_max[constants.ISPEC_MEM_SIZE] = \ self.be_new.get(constants.BE_MAXMEM, None) - res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max) + res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max, + new_disk_template) ispec_min = ispec.copy() ispec_min[constants.ISPEC_MEM_SIZE] = \ self.be_new.get(constants.BE_MINMEM, None) - res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min) + res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min, + new_disk_template) if (res_max or res_min): # FIXME: Improve error message by including information about whether @@ -15357,7 +15369,7 @@ class LUGroupSetParams(LogicalUnit): violations = \ _ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster, self.group), - new_ipolicy, instances) + new_ipolicy, instances, self.cfg) if violations: self.LogWarning("After the ipolicy change the following instances" diff --git a/lib/constants.py b/lib/constants.py index 31bd688c74d853d86e8a287b0488753369f01756..5dc54b1205cb5ecc357e560d5d9915d992ef8e0c 100644 --- a/lib/constants.py +++ b/lib/constants.py @@ -2125,7 +2125,7 @@ IPOLICY_DEFAULTS = { ISPEC_NIC_COUNT: 1, ISPEC_SPINDLE_USE: 1, }, - IPOLICY_DTS: DISK_TEMPLATES, + IPOLICY_DTS: list(DISK_TEMPLATES), IPOLICY_VCPU_RATIO: 4.0, IPOLICY_SPINDLE_RATIO: 32.0, } diff --git a/lib/objects.py b/lib/objects.py index e2f9a1a0bba479fb77b5de1864081c6bba7144b5..51e13f883eebb5fb39d53493e17b23d2f2626330 100644 --- a/lib/objects.py +++ b/lib/objects.py @@ -942,6 +942,9 @@ class InstancePolicy(ConfigObject): """Checks the disk templates for validity. """ + if not disk_templates: + raise errors.ConfigurationError("Instance policy must contain" + + " at least one disk template") wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES) if wrong: raise errors.ConfigurationError("Invalid disk template(s) %s" % @@ -1578,6 +1581,12 @@ class Cluster(TaggableObject): # we can either make sure to upgrade the ipolicy always, or only # do it in some corner cases (e.g. missing keys); note that this # will break any removal of keys from the ipolicy dict + wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS + if wrongkeys: + # These keys would be silently removed by FillIPolicy() + msg = ("Cluster instance policy contains spourious keys: %s" % + utils.CommaJoin(wrongkeys)) + raise errors.ConfigurationError(msg) self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy) @property diff --git a/lib/rapi/client.py b/lib/rapi/client.py index d02faffe530f4e1a10adc876434c75b2ff508351..e48de24b9e0b0f2b439fc99e9910f5ae06855861 100644 --- a/lib/rapi/client.py +++ b/lib/rapi/client.py @@ -1048,7 +1048,7 @@ class GanetiRapiClient(object): # pylint: disable=R0904 body = kwargs _AppendDryRunIf(query, dry_run) - _AppendIf(query, no_remember, ("no-remember", 1)) + _AppendIf(query, no_remember, ("no_remember", 1)) return self._SendRequest(HTTP_PUT, ("/%s/instances/%s/shutdown" % @@ -1069,7 +1069,7 @@ class GanetiRapiClient(object): # pylint: disable=R0904 """ query = [] _AppendDryRunIf(query, dry_run) - _AppendIf(query, no_remember, ("no-remember", 1)) + _AppendIf(query, no_remember, ("no_remember", 1)) return self._SendRequest(HTTP_PUT, ("/%s/instances/%s/startup" % diff --git a/qa/ganeti-qa.py b/qa/ganeti-qa.py index 95c5c5657f7e53305c361f67f20f070731681deb..567732c07f8893d6a3dd7b9a1399ce29b69bde0e 100755 --- a/qa/ganeti-qa.py +++ b/qa/ganeti-qa.py @@ -173,6 +173,8 @@ def RunClusterTests(): ("cluster-reserved-lvs", qa_cluster.TestClusterReservedLvs), # TODO: add more cluster modify tests ("cluster-modify", qa_cluster.TestClusterModifyEmpty), + ("cluster-modify", qa_cluster.TestClusterModifyIPolicy), + ("cluster-modify", qa_cluster.TestClusterModifyISpecs), ("cluster-modify", qa_cluster.TestClusterModifyBe), ("cluster-modify", qa_cluster.TestClusterModifyDisk), ("cluster-rename", qa_cluster.TestClusterRename), @@ -510,6 +512,61 @@ def RunExclusiveStorageTests(): node.Release() +def _BuildSpecDict(par, mn, st, mx): + return {par: {"min": mn, "std": st, "max": mx}} + + +def TestIPolicyPlainInstance(): + """Test instance policy interaction with instances""" + params = ["mem-size", "cpu-count", "disk-count", "disk-size", "nic-count"] + if not qa_config.IsTemplateSupported(constants.DT_PLAIN): + print "Template %s not supported" % constants.DT_PLAIN + return + + # This test assumes that the group policy is empty + (_, old_specs) = qa_cluster.TestClusterSetISpecs({}) + node = qa_config.AcquireNode() + try: + # Log of policy changes, list of tuples: (change, policy_violated) + history = [] + instance = qa_instance.TestInstanceAddWithPlainDisk([node]) + try: + policyerror = [constants.CV_EINSTANCEPOLICY] + for par in params: + qa_cluster.AssertClusterVerify() + (iminval, imaxval) = qa_instance.GetInstanceSpec(instance.name, par) + # Some specs must be multiple of 4 + new_spec = _BuildSpecDict(par, imaxval + 4, imaxval + 4, imaxval + 4) + history.append((new_spec, True)) + qa_cluster.TestClusterSetISpecs(new_spec) + qa_cluster.AssertClusterVerify(warnings=policyerror) + if iminval > 0: + # Some specs must be multiple of 4 + if iminval >= 4: + upper = iminval - 4 + else: + upper = iminval - 1 + new_spec = _BuildSpecDict(par, 0, upper, upper) + history.append((new_spec, True)) + qa_cluster.TestClusterSetISpecs(new_spec) + qa_cluster.AssertClusterVerify(warnings=policyerror) + qa_cluster.TestClusterSetISpecs(old_specs) + history.append((old_specs, False)) + qa_instance.TestInstanceRemove(instance) + finally: + instance.Release() + + # Now we replay the same policy changes, and we expect that the instance + # cannot be created for the cases where we had a policy violation above + for (change, failed) in history: + qa_cluster.TestClusterSetISpecs(change) + if failed: + qa_instance.TestInstanceAddWithPlainDisk([node], fail=True) + # Instance creation with no policy violation has been tested already + finally: + node.Release() + + def RunInstanceTests(): """Create and exercise instances.""" instance_tests = [ @@ -650,6 +707,8 @@ def RunQa(): pnode.Release() RunExclusiveStorageTests() + RunTestIf(["cluster-instance-policy", "instance-add-plain-disk"], + TestIPolicyPlainInstance) # Test removing instance with offline drbd secondary if qa_config.TestEnabled(["instance-remove-drbd-offline", diff --git a/qa/qa-sample.json b/qa/qa-sample.json index 2eda031bfaad3f3b2863b242fd799302d8dafb82..d5e5c869753c4e2b1321981228c342eae72f387a 100644 --- a/qa/qa-sample.json +++ b/qa/qa-sample.json @@ -144,6 +144,7 @@ "cluster-redist-conf": true, "cluster-repair-disk-sizes": true, "cluster-exclusive-storage": true, + "cluster-instance-policy": true, "haskell-confd": true, "htools": true, diff --git a/qa/qa_cluster.py b/qa/qa_cluster.py index e0559050e51bb9274c03a2cd8c7629c73f195600..77d32268edb343bbe33397084f6ddc48be4b0295 100644 --- a/qa/qa_cluster.py +++ b/qa/qa_cluster.py @@ -96,20 +96,33 @@ def _GetBoolClusterField(field): # Cluster-verify errors (date, "ERROR", then error code) -_CVERROR_RE = re.compile(r"^[\w\s:]+\s+- ERROR:([A-Z0-9_-]+):") +_CVERROR_RE = re.compile(r"^[\w\s:]+\s+- (ERROR|WARNING):([A-Z0-9_-]+):") def _GetCVErrorCodes(cvout): - ret = set() + errs = set() + warns = set() for l in cvout.splitlines(): m = _CVERROR_RE.match(l) if m: - ecode = m.group(1) - ret.add(ecode) - return ret + etype = m.group(1) + ecode = m.group(2) + if etype == "ERROR": + errs.add(ecode) + elif etype == "WARNING": + warns.add(ecode) + return (errs, warns) -def AssertClusterVerify(fail=False, errors=None): +def _CheckVerifyErrors(actual, expected, etype): + exp_codes = compat.UniqueFrozenset(e for (_, e, _) in expected) + if not actual.issuperset(exp_codes): + missing = exp_codes.difference(actual) + raise qa_error.Error("Cluster-verify didn't return these expected" + " %ss: %s" % (etype, utils.CommaJoin(missing))) + + +def AssertClusterVerify(fail=False, errors=None, warnings=None): """Run cluster-verify and check the result @type fail: bool @@ -118,19 +131,20 @@ def AssertClusterVerify(fail=False, errors=None): @param errors: List of CV_XXX errors that are expected; if specified, all the errors listed must appear in cluster-verify output. A non-empty value implies C{fail=True}. + @type warnings: list of tuples + @param warnings: Same as C{errors} but for warnings. """ cvcmd = "gnt-cluster verify" mnode = qa_config.GetMasterNode() - if errors: + if errors or warnings: cvout = GetCommandOutput(mnode.primary, cvcmd + " --error-codes", - fail=True) - actual = _GetCVErrorCodes(cvout) - expected = compat.UniqueFrozenset(e for (_, e, _) in errors) - if not actual.issuperset(expected): - missing = expected.difference(actual) - raise qa_error.Error("Cluster-verify didn't return these expected" - " errors: %s" % utils.CommaJoin(missing)) + fail=(fail or errors)) + (act_errs, act_warns) = _GetCVErrorCodes(cvout) + if errors: + _CheckVerifyErrors(act_errs, errors, "error") + if warnings: + _CheckVerifyErrors(act_warns, warnings, "warning") else: AssertCommand(cvcmd, fail=fail, node=mnode) @@ -424,6 +438,199 @@ def TestClusterModifyBe(): AssertCommand(["gnt-cluster", "modify", "-B", bep]) +_START_IPOLICY_RE = re.compile(r"^(\s*)Instance policy") +_START_ISPEC_RE = re.compile(r"^\s+-\s+(std|min|max)") +_VALUE_RE = r"([^\s:][^:]*):\s+(\S.*)$" +_IPOLICY_PARAM_RE = re.compile(r"^\s+-\s+" + _VALUE_RE) +_ISPEC_VALUE_RE = re.compile(r"^\s+" + _VALUE_RE) + + +def _GetClusterIPolicy(): + """Return the run-time values of the cluster-level instance policy. + + @rtype: tuple + @return: (policy, specs), where: + - policy is a dictionary of the policy values, instance specs excluded + - specs is dict of dict, specs[par][key] is a spec value, where key is + "min", "max", or "std" + + """ + mnode = qa_config.GetMasterNode() + info = GetCommandOutput(mnode.primary, "gnt-cluster info") + inside_policy = False + end_ispec_re = None + curr_spec = "" + specs = {} + policy = {} + for line in info.splitlines(): + if inside_policy: + # The order of the matching is important, as some REs overlap + m = _START_ISPEC_RE.match(line) + if m: + curr_spec = m.group(1) + continue + m = _IPOLICY_PARAM_RE.match(line) + if m: + policy[m.group(1)] = m.group(2).strip() + continue + m = _ISPEC_VALUE_RE.match(line) + if m: + assert curr_spec + par = m.group(1) + if par == "memory-size": + par = "mem-size" + d = specs.setdefault(par, {}) + d[curr_spec] = m.group(2).strip() + continue + assert end_ispec_re is not None + if end_ispec_re.match(line): + inside_policy = False + else: + m = _START_IPOLICY_RE.match(line) + if m: + inside_policy = True + # We stop parsing when we find the same indentation level + re_str = r"^\s{%s}\S" % len(m.group(1)) + end_ispec_re = re.compile(re_str) + # Sanity checks + assert len(specs) > 0 + good = ("min" in d and "std" in d and "max" in d for d in specs) + assert good, "Missing item in specs: %s" % specs + assert len(policy) > 0 + return (policy, specs) + + +def TestClusterModifyIPolicy(): + """gnt-cluster modify --ipolicy-*""" + basecmd = ["gnt-cluster", "modify"] + (old_policy, old_specs) = _GetClusterIPolicy() + for par in ["vcpu-ratio", "spindle-ratio"]: + curr_val = float(old_policy[par]) + test_values = [ + (True, 1.0), + (True, 1.5), + (True, 2), + (False, "a"), + # Restore the old value + (True, curr_val), + ] + for (good, val) in test_values: + cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)] + AssertCommand(cmd, fail=not good) + if good: + curr_val = val + # Check the affected parameter + (eff_policy, eff_specs) = _GetClusterIPolicy() + AssertEqual(float(eff_policy[par]), curr_val) + # Check everything else + AssertEqual(eff_specs, old_specs) + for p in eff_policy.keys(): + if p == par: + continue + AssertEqual(eff_policy[p], old_policy[p]) + + # Disk templates are treated slightly differently + par = "disk-templates" + disp_str = "enabled disk templates" + curr_val = old_policy[disp_str] + test_values = [ + (True, constants.DT_PLAIN), + (True, "%s,%s" % (constants.DT_PLAIN, constants.DT_DRBD8)), + (False, "thisisnotadisktemplate"), + (False, ""), + # Restore the old value + (True, curr_val.replace(" ", "")), + ] + for (good, val) in test_values: + cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)] + AssertCommand(cmd, fail=not good) + if good: + curr_val = val + # Check the affected parameter + (eff_policy, eff_specs) = _GetClusterIPolicy() + AssertEqual(eff_policy[disp_str].replace(" ", ""), curr_val) + # Check everything else + AssertEqual(eff_specs, old_specs) + for p in eff_policy.keys(): + if p == disp_str: + continue + AssertEqual(eff_policy[p], old_policy[p]) + + +def TestClusterSetISpecs(new_specs, fail=False, old_values=None): + """Change instance specs. + + @type new_specs: dict of dict + @param new_specs: new_specs[par][key], where key is "min", "max", "std". It + can be an empty dictionary. + @type fail: bool + @param fail: if the change is expected to fail + @type old_values: tuple + @param old_values: (old_policy, old_specs), as returned by + L{_GetClusterIPolicy} + @return: same as L{_GetClusterIPolicy} + + """ + if old_values: + (old_policy, old_specs) = old_values + else: + (old_policy, old_specs) = _GetClusterIPolicy() + if new_specs: + cmd = ["gnt-cluster", "modify"] + for (par, keyvals) in new_specs.items(): + if par == "spindle-use": + # ignore spindle-use, which is not settable + continue + cmd += [ + "--specs-%s" % par, + ",".join(["%s=%s" % (k, v) for (k, v) in keyvals.items()]), + ] + AssertCommand(cmd, fail=fail) + # Check the new state + (eff_policy, eff_specs) = _GetClusterIPolicy() + AssertEqual(eff_policy, old_policy) + if fail: + AssertEqual(eff_specs, old_specs) + else: + for par in eff_specs: + for key in eff_specs[par]: + if par in new_specs and key in new_specs[par]: + AssertEqual(int(eff_specs[par][key]), int(new_specs[par][key])) + else: + AssertEqual(int(eff_specs[par][key]), int(old_specs[par][key])) + return (eff_policy, eff_specs) + + +def TestClusterModifyISpecs(): + """gnt-cluster modify --specs-*""" + params = ["mem-size", "disk-size", "disk-count", "cpu-count", "nic-count"] + (cur_policy, cur_specs) = _GetClusterIPolicy() + for par in params: + test_values = [ + (True, 0, 4, 12), + (True, 4, 4, 12), + (True, 4, 12, 12), + (True, 4, 4, 4), + (False, 4, 0, 12), + (False, 4, 16, 12), + (False, 4, 4, 0), + (False, 12, 4, 4), + (False, 12, 4, 0), + (False, "a", 4, 12), + (False, 0, "a", 12), + (False, 0, 4, "a"), + # This is to restore the old values + (True, + cur_specs[par]["min"], cur_specs[par]["std"], cur_specs[par]["max"]) + ] + for (good, mn, st, mx) in test_values: + new_vals = {par: {"min": str(mn), "std": str(st), "max": str(mx)}} + cur_state = (cur_policy, cur_specs) + # We update cur_specs, as we've copied the values to restore already + (cur_policy, cur_specs) = TestClusterSetISpecs(new_vals, fail=not good, + old_values=cur_state) + + def TestClusterInfo(): """gnt-cluster info""" AssertCommand(["gnt-cluster", "info"]) diff --git a/qa/qa_group.py b/qa/qa_group.py index 22f9f72dbefcd223d8b2c3e8a1127c01195681aa..e48d37f4345f905819bd2a713cf2061e3c4b1eee 100644 --- a/qa/qa_group.py +++ b/qa/qa_group.py @@ -98,6 +98,12 @@ def TestGroupModify(): "min=%s,max=%s,std=0" % (min_v, max_v), group1], fail=True) AssertCommand(["gnt-group", "modify", "--specs-mem-size", "min=%s,max=%s" % (min_v, max_v), group1]) + AssertCommand(["gnt-group", "modify", "--specs-mem-size", + "min=default,max=default", group1]) + AssertCommand(["gnt-group", "modify", "--ipolicy-vcpu-ratio", + "3.5", group1]) + AssertCommand(["gnt-group", "modify", "--ipolicy-vcpu-ratio", + "default", group1]) AssertCommand(["gnt-group", "modify", "--node-parameters", "spindle_count=10", group1]) if qa_config.TestEnabled("htools"): diff --git a/qa/qa_instance.py b/qa/qa_instance.py index 5d5d020a233889a1fd263b29acd9e576b1fa7ce9..e2a8e5c101ae12f4a6827ee2d574dee9febc261f 100644 --- a/qa/qa_instance.py +++ b/qa/qa_instance.py @@ -66,7 +66,7 @@ def _GetGenericAddParameters(inst, disk_template, force_mac=None): return params -def _DiskTest(node, disk_template): +def _DiskTest(node, disk_template, fail=False): instance = qa_config.AcquireInstance() try: cmd = (["gnt-instance", "add", @@ -76,16 +76,22 @@ def _DiskTest(node, disk_template): _GetGenericAddParameters(instance, disk_template)) cmd.append(instance.name) - AssertCommand(cmd) + AssertCommand(cmd, fail=fail) - _CheckSsconfInstanceList(instance.name) - instance.SetDiskTemplate(disk_template) + if not fail: + _CheckSsconfInstanceList(instance.name) + instance.SetDiskTemplate(disk_template) - return instance + return instance except: instance.Release() raise + # Handle the case where creation is expected to fail + assert fail + instance.Release() + return None + def _GetInstanceInfo(instance): """Return information about the actual state of an instance. @@ -162,19 +168,33 @@ def _DestroyInstanceVolumes(instance): AssertCommand(["lvremove", "-f"] + vols, node=node) -def _GetBoolInstanceField(instance, field): - """Get the Boolean value of a field of an instance. +def _GetInstanceField(instance, field): + """Get the value of a field of an instance. @type instance: string @param instance: Instance name @type field: string @param field: Name of the field + @rtype: string """ master = qa_config.GetMasterNode() infocmd = utils.ShellQuoteArgs(["gnt-instance", "list", "--no-headers", - "-o", field, instance]) - info_out = qa_utils.GetCommandOutput(master.primary, infocmd).strip() + "--units", "m", "-o", field, instance]) + return qa_utils.GetCommandOutput(master.primary, infocmd).strip() + + +def _GetBoolInstanceField(instance, field): + """Get the Boolean value of a field of an instance. + + @type instance: string + @param instance: Instance name + @type field: string + @param field: Name of the field + @rtype: bool + + """ + info_out = _GetInstanceField(instance, field) if info_out == "Y": return True elif info_out == "N": @@ -184,6 +204,59 @@ def _GetBoolInstanceField(instance, field): " %s" % (field, instance, info_out)) +def _GetNumInstanceField(instance, field): + """Get a numeric value of a field of an instance. + + @type instance: string + @param instance: Instance name + @type field: string + @param field: Name of the field + @rtype: int or float + + """ + info_out = _GetInstanceField(instance, field) + try: + ret = int(info_out) + except ValueError: + try: + ret = float(info_out) + except ValueError: + raise qa_error.Error("Field %s of instance %s has a non-numeric value:" + " %s" % (field, instance, info_out)) + return ret + + +def GetInstanceSpec(instance, spec): + """Return the current spec for the given parameter. + + @type instance: string + @param instance: Instance name + @type spec: string + @param spec: one of the supported parameters: "mem-size", "cpu-count", + "disk-count", "disk-size", "nic-count" + @rtype: tuple + @return: (minspec, maxspec); minspec and maxspec can be different only for + memory and disk size + + """ + specmap = { + "mem-size": ["be/minmem", "be/maxmem"], + "cpu-count": ["vcpus"], + "disk-count": ["disk.count"], + "disk-size": ["disk.size/ "], + "nic-count": ["nic.count"], + } + # For disks, first we need the number of disks + if spec == "disk-size": + (numdisk, _) = GetInstanceSpec(instance, "disk-count") + fields = ["disk.size/%s" % k for k in range(0, numdisk)] + else: + assert spec in specmap, "%s not in %s" % (spec, specmap) + fields = specmap[spec] + values = [_GetNumInstanceField(instance, f) for f in fields] + return (min(values), max(values)) + + def IsFailoverSupported(instance): return instance.disk_template in constants.DTS_MIRRORED @@ -196,11 +269,13 @@ def IsDiskReplacingSupported(instance): return instance.disk_template == constants.DT_DRBD8 -@InstanceCheck(None, INST_UP, RETURN_VALUE) -def TestInstanceAddWithPlainDisk(nodes): +def TestInstanceAddWithPlainDisk(nodes, fail=False): """gnt-instance add -t plain""" assert len(nodes) == 1 - return _DiskTest(nodes[0].primary, constants.DT_PLAIN) + instance = _DiskTest(nodes[0].primary, constants.DT_PLAIN, fail=fail) + if not fail: + qa_utils.RunInstanceCheck(instance, True) + return instance @InstanceCheck(None, INST_UP, RETURN_VALUE) diff --git a/test/py/ganeti.cmdlib_unittest.py b/test/py/ganeti.cmdlib_unittest.py index e43b1fb77a56435fd04c659c64fca4012de699c1..85f1969a99e514b133e9c764d810a6b4d262fe68 100755 --- a/test/py/ganeti.cmdlib_unittest.py +++ b/test/py/ganeti.cmdlib_unittest.py @@ -654,6 +654,13 @@ def _ValidateComputeMinMaxSpec(name, *_): return None +def _NoDiskComputeMinMaxSpec(name, *_): + if name == constants.ISPEC_DISK_COUNT: + return name + else: + return None + + class _SpecWrapper: def __init__(self, spec): self.spec = spec @@ -663,47 +670,87 @@ class _SpecWrapper: class TestComputeIPolicySpecViolation(unittest.TestCase): + # Minimal policy accepted by _ComputeIPolicySpecViolation() + _MICRO_IPOL = { + constants.IPOLICY_DTS: [constants.DT_PLAIN, constants.DT_DISKLESS], + } + def test(self): compute_fn = _ValidateComputeMinMaxSpec - ret = cmdlib._ComputeIPolicySpecViolation(NotImplemented, 1024, 1, 1, 1, - [1024], 1, _compute_fn=compute_fn) + ret = cmdlib._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1, + [1024], 1, constants.DT_PLAIN, + _compute_fn=compute_fn) self.assertEqual(ret, []) + def testDiskFull(self): + compute_fn = _NoDiskComputeMinMaxSpec + ret = cmdlib._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1, + [1024], 1, constants.DT_PLAIN, + _compute_fn=compute_fn) + self.assertEqual(ret, [constants.ISPEC_DISK_COUNT]) + + def testDiskLess(self): + compute_fn = _NoDiskComputeMinMaxSpec + ret = cmdlib._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1, + [1024], 1, constants.DT_DISKLESS, + _compute_fn=compute_fn) + self.assertEqual(ret, []) + + def testWrongTemplates(self): + compute_fn = _ValidateComputeMinMaxSpec + ret = cmdlib._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1, + [1024], 1, constants.DT_DRBD8, + _compute_fn=compute_fn) + self.assertEqual(len(ret), 1) + self.assertTrue("Disk template" in ret[0]) + def testInvalidArguments(self): self.assertRaises(AssertionError, cmdlib._ComputeIPolicySpecViolation, - NotImplemented, 1024, 1, 1, 1, [], 1) + self._MICRO_IPOL, 1024, 1, 1, 1, [], 1, + constants.DT_PLAIN,) def testInvalidSpec(self): spec = _SpecWrapper([None, False, "foo", None, "bar", None]) compute_fn = spec.ComputeMinMaxSpec - ret = cmdlib._ComputeIPolicySpecViolation(NotImplemented, 1024, 1, 1, 1, - [1024], 1, _compute_fn=compute_fn) + ret = cmdlib._ComputeIPolicySpecViolation(self._MICRO_IPOL, 1024, 1, 1, 1, + [1024], 1, constants.DT_PLAIN, + _compute_fn=compute_fn) self.assertEqual(ret, ["foo", "bar"]) self.assertFalse(spec.spec) class _StubComputeIPolicySpecViolation: def __init__(self, mem_size, cpu_count, disk_count, nic_count, disk_sizes, - spindle_use): + spindle_use, disk_template): self.mem_size = mem_size self.cpu_count = cpu_count self.disk_count = disk_count self.nic_count = nic_count self.disk_sizes = disk_sizes self.spindle_use = spindle_use + self.disk_template = disk_template def __call__(self, _, mem_size, cpu_count, disk_count, nic_count, disk_sizes, - spindle_use): + spindle_use, disk_template): assert self.mem_size == mem_size assert self.cpu_count == cpu_count assert self.disk_count == disk_count assert self.nic_count == nic_count assert self.disk_sizes == disk_sizes assert self.spindle_use == spindle_use + assert self.disk_template == disk_template return [] +class _FakeConfigForComputeIPolicyInstanceViolation: + def __init__(self, be): + self.cluster = objects.Cluster(beparams={"default": be}) + + def GetClusterInfo(self): + return self.cluster + + class TestComputeIPolicyInstanceViolation(unittest.TestCase): def test(self): beparams = { @@ -712,10 +759,18 @@ class TestComputeIPolicyInstanceViolation(unittest.TestCase): constants.BE_SPINDLE_USE: 4, } disks = [objects.Disk(size=512)] - instance = objects.Instance(beparams=beparams, disks=disks, nics=[]) - stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 4) + cfg = _FakeConfigForComputeIPolicyInstanceViolation(beparams) + instance = objects.Instance(beparams=beparams, disks=disks, nics=[], + disk_template=constants.DT_PLAIN) + stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 4, + constants.DT_PLAIN) ret = cmdlib._ComputeIPolicyInstanceViolation(NotImplemented, instance, - _compute_fn=stub) + cfg, _compute_fn=stub) + self.assertEqual(ret, []) + instance2 = objects.Instance(beparams={}, disks=disks, nics=[], + disk_template=constants.DT_PLAIN) + ret = cmdlib._ComputeIPolicyInstanceViolation(NotImplemented, instance2, + cfg, _compute_fn=stub) self.assertEqual(ret, []) @@ -729,8 +784,10 @@ class TestComputeIPolicyInstanceSpecViolation(unittest.TestCase): constants.ISPEC_NIC_COUNT: 0, constants.ISPEC_SPINDLE_USE: 1, } - stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 1) + stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 1, + constants.DT_PLAIN) ret = cmdlib._ComputeIPolicyInstanceSpecViolation(NotImplemented, ispec, + constants.DT_PLAIN, _compute_fn=stub) self.assertEqual(ret, []) @@ -751,14 +808,14 @@ class TestComputeIPolicyNodeViolation(unittest.TestCase): def testSameGroup(self): ret = cmdlib._ComputeIPolicyNodeViolation(NotImplemented, NotImplemented, - "foo", "foo", + "foo", "foo", NotImplemented, _compute_fn=self.recorder) self.assertFalse(self.recorder.called) self.assertEqual(ret, []) def testDifferentGroup(self): ret = cmdlib._ComputeIPolicyNodeViolation(NotImplemented, NotImplemented, - "foo", "bar", + "foo", "bar", NotImplemented, _compute_fn=self.recorder) self.assertTrue(self.recorder.called) self.assertEqual(ret, []) @@ -783,7 +840,7 @@ class TestCheckTargetNodeIPolicy(unittest.TestCase): def testNoViolation(self): compute_recoder = _CallRecorder(return_value=[]) cmdlib._CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance, - self.target_node, + self.target_node, NotImplemented, _compute_fn=compute_recoder) self.assertTrue(compute_recoder.called) self.assertEqual(self.lu.warning_log, []) @@ -792,15 +849,15 @@ class TestCheckTargetNodeIPolicy(unittest.TestCase): compute_recoder = _CallRecorder(return_value=["mem_size not in range"]) self.assertRaises(errors.OpPrereqError, cmdlib._CheckTargetNodeIPolicy, self.lu, NotImplemented, self.instance, self.target_node, - _compute_fn=compute_recoder) + NotImplemented, _compute_fn=compute_recoder) self.assertTrue(compute_recoder.called) self.assertEqual(self.lu.warning_log, []) def testIgnoreViolation(self): compute_recoder = _CallRecorder(return_value=["mem_size not in range"]) cmdlib._CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance, - self.target_node, ignore=True, - _compute_fn=compute_recoder) + self.target_node, NotImplemented, + ignore=True, _compute_fn=compute_recoder) self.assertTrue(compute_recoder.called) msg = ("Instance does not meet target node group's (bar) instance policy:" " mem_size not in range") @@ -1671,5 +1728,105 @@ class TestVerifyErrors(unittest.TestCase): self.assertTrue(self._ERR1ID in lu.msglist[0]) +class TestGetUpdatedIPolicy(unittest.TestCase): + """Tests for cmdlib._GetUpdatedIPolicy()""" + _OLD_CLUSTER_POLICY = { + constants.IPOLICY_VCPU_RATIO: 1.5, + constants.ISPECS_MIN: { + constants.ISPEC_MEM_SIZE: 20, + constants.ISPEC_CPU_COUNT: 2, + }, + constants.ISPECS_MAX: {}, + constants.ISPECS_STD: {}, + } + + _OLD_GROUP_POLICY = { + constants.IPOLICY_SPINDLE_RATIO: 2.5, + constants.ISPECS_MIN: { + constants.ISPEC_DISK_SIZE: 20, + constants.ISPEC_NIC_COUNT: 2, + }, + constants.ISPECS_MAX: {}, + } + + def _TestSetSpecs(self, old_policy, isgroup): + ispec_key = constants.ISPECS_MIN + diff_ispec = { + constants.ISPEC_MEM_SIZE: 50, + constants.ISPEC_DISK_SIZE: 30, + } + diff_policy = { + ispec_key: diff_ispec + } + new_policy = cmdlib._GetUpdatedIPolicy(old_policy, diff_policy, + group_policy=isgroup) + new_ispec = new_policy[ispec_key] + for key in diff_ispec: + self.assertTrue(key in new_ispec) + self.assertEqual(new_ispec[key], diff_ispec[key]) + for key in old_policy: + if not key in diff_policy: + self.assertTrue(key in new_policy) + self.assertEqual(new_policy[key], old_policy[key]) + old_ispec = old_policy[ispec_key] + for key in old_ispec: + if not key in diff_ispec: + self.assertTrue(key in new_ispec) + self.assertEqual(new_ispec[key], old_ispec[key]) + + def _TestSet(self, old_policy, isgroup): + diff_policy = { + constants.IPOLICY_VCPU_RATIO: 3, + constants.IPOLICY_SPINDLE_RATIO: 1.9, + } + new_policy = cmdlib._GetUpdatedIPolicy(old_policy, diff_policy, + group_policy=isgroup) + for key in diff_policy: + self.assertTrue(key in new_policy) + self.assertEqual(new_policy[key], diff_policy[key]) + for key in old_policy: + if not key in diff_policy: + self.assertTrue(key in new_policy) + self.assertEqual(new_policy[key], old_policy[key]) + + def testSet(self): + self._TestSet(self._OLD_GROUP_POLICY, True) + self._TestSetSpecs(self._OLD_GROUP_POLICY, True) + self._TestSet(self._OLD_CLUSTER_POLICY, False) + self._TestSetSpecs(self._OLD_CLUSTER_POLICY, False) + + def testUnset(self): + old_policy = self._OLD_GROUP_POLICY + diff_policy = { + constants.IPOLICY_SPINDLE_RATIO: constants.VALUE_DEFAULT, + } + new_policy = cmdlib._GetUpdatedIPolicy(old_policy, diff_policy, + group_policy=True) + for key in diff_policy: + self.assertFalse(key in new_policy) + for key in old_policy: + if not key in diff_policy: + self.assertTrue(key in new_policy) + self.assertEqual(new_policy[key], old_policy[key]) + + def _TestInvalidKeys(self, old_policy, isgroup): + INVALID_DICT = { + "this_key_shouldnt_be_allowed": 3, + } + invalid_policy = INVALID_DICT + self.assertRaises(errors.OpPrereqError, cmdlib._GetUpdatedIPolicy, + old_policy, invalid_policy, group_policy=isgroup) + for key in constants.IPOLICY_ISPECS: + invalid_ispec = { + key: INVALID_DICT, + } + self.assertRaises(errors.TypeEnforcementError, cmdlib._GetUpdatedIPolicy, + old_policy, invalid_ispec, group_policy=isgroup) + + def testInvalidKeys(self): + self._TestInvalidKeys(self._OLD_GROUP_POLICY, True) + self._TestInvalidKeys(self._OLD_CLUSTER_POLICY, False) + + if __name__ == "__main__": testutils.GanetiTestProgram() diff --git a/test/py/ganeti.objects_unittest.py b/test/py/ganeti.objects_unittest.py index fc9a6027cc694bcd4e4b4d5a7e3ab7ed2647268e..a90afb2d261ff801cdac2cb9cb24cd689b585ab3 100755 --- a/test/py/ganeti.objects_unittest.py +++ b/test/py/ganeti.objects_unittest.py @@ -215,6 +215,13 @@ class TestClusterObject(unittest.TestCase): self.fake_cl.enabled_hypervisors = sorted(constants.HYPER_TYPES) self.assertEqual(self.fake_cl.primary_hypervisor, constants.HT_CHROOT) + def testUpgradeConfig(self): + # FIXME: This test is incomplete + cluster = objects.Cluster() + cluster.UpgradeConfig() + cluster = objects.Cluster(ipolicy={"unknown_key": None}) + self.assertRaises(errors.ConfigurationError, cluster.UpgradeConfig) + class TestClusterObjectTcpUdpPortPool(unittest.TestCase): def testNewCluster(self): @@ -400,5 +407,165 @@ class TestNode(unittest.TestCase): self.assertTrue(constants.ND_SPINDLE_COUNT in node2.ndparams) +class TestInstancePolicy(unittest.TestCase): + def setUp(self): + # Policies are big, and we want to see the difference in case of an error + self.maxDiff = None + + def _AssertIPolicyIsFull(self, policy): + self.assertEqual(frozenset(policy.keys()), constants.IPOLICY_ALL_KEYS) + for key in constants.IPOLICY_ISPECS: + spec = policy[key] + self.assertEqual(frozenset(spec.keys()), constants.ISPECS_PARAMETERS) + + def testDefaultIPolicy(self): + objects.InstancePolicy.CheckParameterSyntax(constants.IPOLICY_DEFAULTS, + True) + self._AssertIPolicyIsFull(constants.IPOLICY_DEFAULTS) + + def testCheckISpecSyntax(self): + par = "my_parameter" + for check_std in [True, False]: + if check_std: + allkeys = constants.IPOLICY_ISPECS + else: + allkeys = constants.IPOLICY_ISPECS - frozenset([constants.ISPECS_STD]) + # Only one policy limit + for key in allkeys: + policy = dict((k, {}) for k in allkeys) + policy[key][par] = 11 + objects.InstancePolicy.CheckISpecSyntax(policy, par, check_std) + # Min and max only + good_values = [(11, 11), (11, 40), (0, 0)] + for (mn, mx) in good_values: + policy = dict((k, {}) for k in allkeys) + policy[constants.ISPECS_MIN][par] = mn + policy[constants.ISPECS_MAX][par] = mx + objects.InstancePolicy.CheckISpecSyntax(policy, par, check_std) + policy = dict((k, {}) for k in allkeys) + policy[constants.ISPECS_MIN][par] = 11 + policy[constants.ISPECS_MAX][par] = 5 + self.assertRaises(errors.ConfigurationError, + objects.InstancePolicy.CheckISpecSyntax, + policy, par, check_std) + # Min, std, max + good_values = [ + (11, 11, 11), + (11, 11, 40), + (11, 40, 40), + ] + for (mn, st, mx) in good_values: + policy = { + constants.ISPECS_MIN: {par: mn}, + constants.ISPECS_STD: {par: st}, + constants.ISPECS_MAX: {par: mx}, + } + objects.InstancePolicy.CheckISpecSyntax(policy, par, True) + bad_values = [ + (11, 11, 5), + (40, 11, 11), + (11, 80, 40), + (11, 5, 40), + (11, 5, 5), + (40, 40, 11), + ] + for (mn, st, mx) in bad_values: + policy = { + constants.ISPECS_MIN: {par: mn}, + constants.ISPECS_STD: {par: st}, + constants.ISPECS_MAX: {par: mx}, + } + self.assertRaises(errors.ConfigurationError, + objects.InstancePolicy.CheckISpecSyntax, + policy, par, True) + + def testCheckDiskTemplates(self): + invalid = "this_is_not_a_good_template" + for dt in constants.DISK_TEMPLATES: + objects.InstancePolicy.CheckDiskTemplates([dt]) + objects.InstancePolicy.CheckDiskTemplates(list(constants.DISK_TEMPLATES)) + bad_examples = [ + [invalid], + [constants.DT_DRBD8, invalid], + list(constants.DISK_TEMPLATES) + [invalid], + [], + None, + ] + for dtl in bad_examples: + self.assertRaises(errors.ConfigurationError, + objects.InstancePolicy.CheckDiskTemplates, + dtl) + + def testCheckParameterSyntax(self): + invalid = "this_key_shouldnt_be_here" + for check_std in [True, False]: + self.assertRaises(KeyError, + objects.InstancePolicy.CheckParameterSyntax, + {}, check_std) + policy = objects.MakeEmptyIPolicy() + policy[invalid] = None + self.assertRaises(errors.ConfigurationError, + objects.InstancePolicy.CheckParameterSyntax, + policy, check_std) + for par in constants.IPOLICY_PARAMETERS: + policy = objects.MakeEmptyIPolicy() + for val in ("blah", None, {}, [42]): + policy[par] = val + self.assertRaises(errors.ConfigurationError, + objects.InstancePolicy.CheckParameterSyntax, + policy, check_std) + + def testFillIPolicyEmpty(self): + policy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, {}) + objects.InstancePolicy.CheckParameterSyntax(policy, True) + self.assertEqual(policy, constants.IPOLICY_DEFAULTS) + + def _AssertISpecsMerged(self, default_spec, diff_spec, merged_spec): + for (param, value) in merged_spec.items(): + if param in diff_spec: + self.assertEqual(value, diff_spec[param]) + else: + self.assertEqual(value, default_spec[param]) + + def _AssertIPolicyMerged(self, default_pol, diff_pol, merged_pol): + for (key, value) in merged_pol.items(): + if key in diff_pol: + if key in constants.IPOLICY_ISPECS: + self._AssertISpecsMerged(default_pol[key], diff_pol[key], value) + else: + self.assertEqual(value, diff_pol[key]) + else: + self.assertEqual(value, default_pol[key]) + + def testFillIPolicy(self): + partial_policies = [ + {constants.IPOLICY_VCPU_RATIO: 3.14}, + {constants.IPOLICY_SPINDLE_RATIO: 2.72}, + {constants.IPOLICY_DTS: [constants.DT_FILE]}, + ] + for diff_pol in partial_policies: + policy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, diff_pol) + objects.InstancePolicy.CheckParameterSyntax(policy, True) + self._AssertIPolicyIsFull(policy) + self._AssertIPolicyMerged(constants.IPOLICY_DEFAULTS, diff_pol, policy) + + def testFillIPolicySpecs(self): + partial_policies = [ + {constants.ISPECS_MIN: {constants.ISPEC_MEM_SIZE: 32}, + constants.ISPECS_MAX: {constants.ISPEC_CPU_COUNT: 1024}}, + {constants.ISPECS_STD: {constants.ISPEC_DISK_SIZE: 2048}, + constants.ISPECS_MAX: { + constants.ISPEC_DISK_COUNT: constants.MAX_DISKS - 1, + constants.ISPEC_NIC_COUNT: constants.MAX_NICS - 1, + }}, + {constants.ISPECS_STD: {constants.ISPEC_SPINDLE_USE: 3}}, + ] + for diff_pol in partial_policies: + policy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, diff_pol) + objects.InstancePolicy.CheckParameterSyntax(policy, True) + self._AssertIPolicyIsFull(policy) + self._AssertIPolicyMerged(constants.IPOLICY_DEFAULTS, diff_pol, policy) + + if __name__ == "__main__": testutils.GanetiTestProgram()