From 2477c1c5aa9448f70e562ea494ee8899981423d6 Mon Sep 17 00:00:00 2001
From: Bernardo Dal Seno <bdalseno@google.com>
Date: Thu, 7 Mar 2013 03:14:56 +0100
Subject: [PATCH] Fix instance policy checks for default back-end parameters

Policy violations of back-end parameters that used the cluster default
value were not reported in cluster-verify.

Signed-off-by: Bernardo Dal Seno <bdalseno@google.com>
Reviewed-by: Guido Trotter <ultrotter@google.com>
---
 lib/cmdlib.py                     | 55 ++++++++++++++++++-------------
 test/py/ganeti.cmdlib_unittest.py | 28 ++++++++++++----
 2 files changed, 54 insertions(+), 29 deletions(-)

diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index e3b8927ef..560059d9e 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -1278,7 +1278,7 @@ def _ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
                        for (name, qualifier, value) in test_settings))
 
 
-def _ComputeIPolicyInstanceViolation(ipolicy, instance,
+def _ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
                                      _compute_fn=_ComputeIPolicySpecViolation):
   """Compute if instance meets the specs of ipolicy.
 
@@ -1286,13 +1286,16 @@ def _ComputeIPolicyInstanceViolation(ipolicy, instance,
   @param ipolicy: The ipolicy to verify against
   @type instance: L{objects.Instance}
   @param instance: The instance to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
   """
-  mem_size = instance.beparams.get(constants.BE_MAXMEM, None)
-  cpu_count = instance.beparams.get(constants.BE_VCPUS, None)
-  spindle_use = instance.beparams.get(constants.BE_SPINDLE_USE, None)
+  be_full = cfg.GetClusterInfo().FillBE(instance)
+  mem_size = be_full[constants.BE_MAXMEM]
+  cpu_count = be_full[constants.BE_VCPUS]
+  spindle_use = be_full[constants.BE_SPINDLE_USE]
   disk_count = len(instance.disks)
   disk_sizes = [disk.size for disk in instance.disks]
   nic_count = len(instance.nics)
@@ -1329,7 +1332,7 @@ def _ComputeIPolicyInstanceSpecViolation(
 
 
 def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
-                                 target_group,
+                                 target_group, cfg,
                                  _compute_fn=_ComputeIPolicyInstanceViolation):
   """Compute if instance meets the specs of the new target group.
 
@@ -1337,6 +1340,8 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
   @param instance: The instance object to verify
   @param current_group: The current group of the instance
   @param target_group: The new group of the instance
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
@@ -1344,23 +1349,25 @@ def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
   if current_group == target_group:
     return []
   else:
-    return _compute_fn(ipolicy, instance)
+    return _compute_fn(ipolicy, instance, cfg)
 
 
-def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
+def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
                             _compute_fn=_ComputeIPolicyNodeViolation):
   """Checks that the target node is correct in terms of instance policy.
 
   @param ipolicy: The ipolicy to verify
   @param instance: The instance object to verify
   @param node: The new node to relocate
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @param ignore: Ignore violations of the ipolicy
   @param _compute_fn: The function to verify ipolicy (unittest only)
   @see: L{_ComputeIPolicySpecViolation}
 
   """
   primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
-  res = _compute_fn(ipolicy, instance, primary_node.group, node.group)
+  res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
 
   if res:
     msg = ("Instance does not meet target node group's (%s) instance"
@@ -1371,18 +1378,20 @@ def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, ignore=False,
       raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
 
-def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances):
+def _ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
   """Computes a set of any instances that would violate the new ipolicy.
 
   @param old_ipolicy: The current (still in-place) ipolicy
   @param new_ipolicy: The new (to become) ipolicy
   @param instances: List of instances to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @return: A list of instances which violates the new ipolicy but
       did not before
 
   """
-  return (_ComputeViolatingInstances(new_ipolicy, instances) -
-          _ComputeViolatingInstances(old_ipolicy, instances))
+  return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
+          _ComputeViolatingInstances(old_ipolicy, instances, cfg))
 
 
 def _ExpandItemName(fn, name, kind):
@@ -1661,17 +1670,19 @@ def _DecideSelfPromotion(lu, exceptions=None):
   return mc_now < mc_should
 
 
-def _ComputeViolatingInstances(ipolicy, instances):
+def _ComputeViolatingInstances(ipolicy, instances, cfg):
   """Computes a set of instances who violates given ipolicy.
 
   @param ipolicy: The ipolicy to verify
-  @type instances: object.Instance
+  @type instances: L{objects.Instance}
   @param instances: List of instances to verify
+  @type cfg: L{config.ConfigWriter}
+  @param cfg: Cluster configuration
   @return: A frozenset of instance names violating the ipolicy
 
   """
   return frozenset([inst.name for inst in instances
-                    if _ComputeIPolicyInstanceViolation(ipolicy, inst)])
+                    if _ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
 
 
 def _CheckNicsBridgesExist(lu, target_nics, target_node):
@@ -2640,7 +2651,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
     cluster = self.cfg.GetClusterInfo()
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                             self.group_info)
-    err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config)
+    err = _ComputeIPolicyInstanceViolation(ipolicy, inst_config, self.cfg)
     _ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance, utils.CommaJoin(err),
              code=self.ETYPE_WARNING)
 
@@ -4280,7 +4291,7 @@ class LUClusterSetParams(LogicalUnit):
         new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
         ipol = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group)
         new = _ComputeNewInstanceViolations(ipol,
-                                            new_ipolicy, instances)
+                                            new_ipolicy, instances, self.cfg)
         if new:
           violations.update(new)
 
@@ -8401,7 +8412,7 @@ class LUInstanceMove(LogicalUnit):
     cluster = self.cfg.GetClusterInfo()
     group_info = self.cfg.GetNodeGroup(node.group)
     ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
-    _CheckTargetNodeIPolicy(self, ipolicy, instance, node,
+    _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
                             ignore=self.op.ignore_ipolicy)
 
     if instance.admin_state == constants.ADMINST_UP:
@@ -8676,7 +8687,7 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
                               ignore=self.ignore_ipolicy)
 
       # self.target_node is already populated, either directly or by the
@@ -8720,7 +8731,7 @@ class TLMigrateInstance(Tasklet):
       group_info = self.cfg.GetNodeGroup(nodeinfo.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               group_info)
-      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
                               ignore=self.ignore_ipolicy)
 
     i_be = cluster.FillBE(instance)
@@ -11729,7 +11740,7 @@ class TLReplaceDisks(Tasklet):
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               new_group_info)
       _CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
-                              ignore=self.ignore_ipolicy)
+                              self.cfg, ignore=self.ignore_ipolicy)
 
     for node in check_nodes:
       _CheckNodeOnline(self.lu, node)
@@ -13577,7 +13588,7 @@ class LUInstanceSetParams(LogicalUnit):
       snode_group = self.cfg.GetNodeGroup(snode_info.group)
       ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                               snode_group)
-      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
+      _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
                               ignore=self.op.ignore_ipolicy)
       if pnode_info.group != snode_info.group:
         self.LogWarning("The primary and secondary nodes are in two"
@@ -15349,7 +15360,7 @@ class LUGroupSetParams(LogicalUnit):
       violations = \
           _ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
                                                                   self.group),
-                                        new_ipolicy, instances)
+                                        new_ipolicy, instances, self.cfg)
 
       if violations:
         self.LogWarning("After the ipolicy change the following instances"
diff --git a/test/py/ganeti.cmdlib_unittest.py b/test/py/ganeti.cmdlib_unittest.py
index 544d93014..85f1969a9 100755
--- a/test/py/ganeti.cmdlib_unittest.py
+++ b/test/py/ganeti.cmdlib_unittest.py
@@ -743,6 +743,14 @@ class _StubComputeIPolicySpecViolation:
     return []
 
 
+class _FakeConfigForComputeIPolicyInstanceViolation:
+  def __init__(self, be):
+    self.cluster = objects.Cluster(beparams={"default": be})
+
+  def GetClusterInfo(self):
+    return self.cluster
+
+
 class TestComputeIPolicyInstanceViolation(unittest.TestCase):
   def test(self):
     beparams = {
@@ -751,12 +759,18 @@ class TestComputeIPolicyInstanceViolation(unittest.TestCase):
       constants.BE_SPINDLE_USE: 4,
       }
     disks = [objects.Disk(size=512)]
+    cfg = _FakeConfigForComputeIPolicyInstanceViolation(beparams)
     instance = objects.Instance(beparams=beparams, disks=disks, nics=[],
                                 disk_template=constants.DT_PLAIN)
     stub = _StubComputeIPolicySpecViolation(2048, 2, 1, 0, [512], 4,
                                             constants.DT_PLAIN)
     ret = cmdlib._ComputeIPolicyInstanceViolation(NotImplemented, instance,
-                                                  _compute_fn=stub)
+                                                  cfg, _compute_fn=stub)
+    self.assertEqual(ret, [])
+    instance2 = objects.Instance(beparams={}, disks=disks, nics=[],
+                                 disk_template=constants.DT_PLAIN)
+    ret = cmdlib._ComputeIPolicyInstanceViolation(NotImplemented, instance2,
+                                                  cfg, _compute_fn=stub)
     self.assertEqual(ret, [])
 
 
@@ -794,14 +808,14 @@ class TestComputeIPolicyNodeViolation(unittest.TestCase):
 
   def testSameGroup(self):
     ret = cmdlib._ComputeIPolicyNodeViolation(NotImplemented, NotImplemented,
-                                              "foo", "foo",
+                                              "foo", "foo", NotImplemented,
                                               _compute_fn=self.recorder)
     self.assertFalse(self.recorder.called)
     self.assertEqual(ret, [])
 
   def testDifferentGroup(self):
     ret = cmdlib._ComputeIPolicyNodeViolation(NotImplemented, NotImplemented,
-                                              "foo", "bar",
+                                              "foo", "bar", NotImplemented,
                                               _compute_fn=self.recorder)
     self.assertTrue(self.recorder.called)
     self.assertEqual(ret, [])
@@ -826,7 +840,7 @@ class TestCheckTargetNodeIPolicy(unittest.TestCase):
   def testNoViolation(self):
     compute_recoder = _CallRecorder(return_value=[])
     cmdlib._CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance,
-                                   self.target_node,
+                                   self.target_node, NotImplemented,
                                    _compute_fn=compute_recoder)
     self.assertTrue(compute_recoder.called)
     self.assertEqual(self.lu.warning_log, [])
@@ -835,15 +849,15 @@ class TestCheckTargetNodeIPolicy(unittest.TestCase):
     compute_recoder = _CallRecorder(return_value=["mem_size not in range"])
     self.assertRaises(errors.OpPrereqError, cmdlib._CheckTargetNodeIPolicy,
                       self.lu, NotImplemented, self.instance, self.target_node,
-                      _compute_fn=compute_recoder)
+                      NotImplemented, _compute_fn=compute_recoder)
     self.assertTrue(compute_recoder.called)
     self.assertEqual(self.lu.warning_log, [])
 
   def testIgnoreViolation(self):
     compute_recoder = _CallRecorder(return_value=["mem_size not in range"])
     cmdlib._CheckTargetNodeIPolicy(self.lu, NotImplemented, self.instance,
-                                   self.target_node, ignore=True,
-                                   _compute_fn=compute_recoder)
+                                   self.target_node, NotImplemented,
+                                   ignore=True, _compute_fn=compute_recoder)
     self.assertTrue(compute_recoder.called)
     msg = ("Instance does not meet target node group's (bar) instance policy:"
            " mem_size not in range")
-- 
GitLab