From 04e1bfaf386bb8487c81d0dead016f19e85717cb Mon Sep 17 00:00:00 2001
From: Guido Trotter <ultrotter@google.com>
Date: Wed, 23 Jul 2008 14:23:05 +0000
Subject: [PATCH] Invert nodes/instances locking order

An implementation mistake from the original design caused nodes to be
locked before instances, rather than after. This patch inverts the level
numbering, changing also the relevant unittests and the recursive
locking function starting point.

Reviewed-by: iustinp
---
 lib/locking.py                  |  8 ++++----
 lib/mcpu.py                     |  2 +-
 test/ganeti.locking_unittest.py | 11 +++++------
 3 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/lib/locking.py b/lib/locking.py
index 733fa8451..2144735c7 100644
--- a/lib/locking.py
+++ b/lib/locking.py
@@ -670,12 +670,12 @@ class LockSet:
 #   If you need more than one node, or more than one instance, acquire them at
 #   the same time.
 LEVEL_CLUSTER = 0
-LEVEL_NODE = 1
-LEVEL_INSTANCE = 2
+LEVEL_INSTANCE = 1
+LEVEL_NODE = 2
 
 LEVELS = [LEVEL_CLUSTER,
-          LEVEL_NODE,
-          LEVEL_INSTANCE]
+          LEVEL_INSTANCE,
+          LEVEL_NODE]
 
 # Lock levels which are modifiable
 LEVELS_MOD = [LEVEL_NODE, LEVEL_INSTANCE]
diff --git a/lib/mcpu.py b/lib/mcpu.py
index f9e1d84bf..d78adb46d 100644
--- a/lib/mcpu.py
+++ b/lib/mcpu.py
@@ -176,7 +176,7 @@ class Processor(object):
       lu = lu_class(self, op, self.context, sstore)
       lu.ExpandNames()
       assert lu.needed_locks is not None, "needed_locks not set by LU"
-      result = self._LockAndExecLU(lu, locking.LEVEL_NODE)
+      result = self._LockAndExecLU(lu, locking.LEVEL_INSTANCE)
     finally:
       self.context.glm.release(locking.LEVEL_CLUSTER)
       self.exclusive_BGL = False
diff --git a/test/ganeti.locking_unittest.py b/test/ganeti.locking_unittest.py
index 195b27fb3..9da130300 100755
--- a/test/ganeti.locking_unittest.py
+++ b/test/ganeti.locking_unittest.py
@@ -621,14 +621,14 @@ class TestGanetiLockManager(unittest.TestCase):
   def testAcquireRelease(self):
     self.GL.acquire(locking.LEVEL_CLUSTER, ['BGL'], shared=1)
     self.assertEquals(self.GL._list_owned(locking.LEVEL_CLUSTER), set(['BGL']))
+    self.GL.acquire(locking.LEVEL_INSTANCE, ['i1'])
     self.GL.acquire(locking.LEVEL_NODE, ['n1', 'n2'], shared=1)
-    self.GL.release(locking.LEVEL_NODE)
-    self.GL.acquire(locking.LEVEL_NODE, ['n1'])
+    self.GL.release(locking.LEVEL_NODE, ['n2'])
     self.assertEquals(self.GL._list_owned(locking.LEVEL_NODE), set(['n1']))
-    self.GL.acquire(locking.LEVEL_INSTANCE, ['i1', 'i2'])
-    self.GL.release(locking.LEVEL_INSTANCE, ['i2'])
     self.assertEquals(self.GL._list_owned(locking.LEVEL_INSTANCE), set(['i1']))
     self.GL.release(locking.LEVEL_NODE)
+    self.assertEquals(self.GL._list_owned(locking.LEVEL_NODE), set())
+    self.assertEquals(self.GL._list_owned(locking.LEVEL_INSTANCE), set(['i1']))
     self.GL.release(locking.LEVEL_INSTANCE)
     self.assertRaises(errors.LockError, self.GL.acquire,
                       locking.LEVEL_INSTANCE, ['i5'])
@@ -656,7 +656,7 @@ class TestGanetiLockManager(unittest.TestCase):
 
   def testWrongOrder(self):
     self.GL.acquire(locking.LEVEL_CLUSTER, ['BGL'], shared=1)
-    self.GL.acquire(locking.LEVEL_INSTANCE, ['i3'])
+    self.GL.acquire(locking.LEVEL_NODE, ['n2'])
     self.assertRaises(AssertionError, self.GL.acquire,
                       locking.LEVEL_NODE, ['n1'])
     self.assertRaises(AssertionError, self.GL.acquire,
@@ -678,7 +678,6 @@ class TestGanetiLockManager(unittest.TestCase):
     self.GL.acquire(locking.LEVEL_CLUSTER, ['BGL'], shared=1)
     Thread(target=self._doLock, args=(locking.LEVEL_INSTANCE, 'i1', 1)).start()
     self.assertEqual(self.done.get(True, 1), 'DONE')
-    self.GL.acquire(locking.LEVEL_NODE, ['n1'])
     self.GL.acquire(locking.LEVEL_INSTANCE, ['i3'])
     Thread(target=self._doLock, args=(locking.LEVEL_INSTANCE, 'i1', 1)).start()
     self.assertEqual(self.done.get(True, 1), 'DONE')
-- 
GitLab