From 3977a4c14b93ea833d9e0e1efb9bf2bede70b576 Mon Sep 17 00:00:00 2001
From: Guido Trotter <ultrotter@google.com>
Date: Wed, 30 Jul 2008 11:30:10 +0000
Subject: [PATCH] Make sharing locks possible

LUs can declare which locks they need by populating the
self.needed_locks dictionary, but those locks are always acquired as
exclusive. Make it possible to acquire shared locks as well, by
declaring a particular level as shared in the self.share_locks
dictionary. By default this dictionary is populated so that all locks
are acquired exclusively.

Reviewed-by: iustinp
---
 lib/cmdlib.py | 5 +++++
 lib/mcpu.py   | 5 ++++-
 2 files changed, 9 insertions(+), 1 deletion(-)

diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index 4d172eefc..a6d759f07 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -83,6 +83,7 @@ class LogicalUnit(object):
     self.sstore = sstore
     self.context = context
     self.needed_locks = None
+    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
     self.__ssh = None
 
     for attr_name in self._OP_REQP:
@@ -128,6 +129,10 @@ class LogicalUnit(object):
         (this reflects what LockSet does, and will be replaced before
         CheckPrereq with the full list of nodes that have been locked)
 
+    If you need to share locks (rather than acquire them exclusively) at one
+    level you can modify self.share_locks, setting a true value (usually 1) for
+    that level. By default locks are not shared.
+
     Examples:
     # Acquire all nodes and one instance
     self.needed_locks = {
diff --git a/lib/mcpu.py b/lib/mcpu.py
index 66f657d48..93dfaea9f 100644
--- a/lib/mcpu.py
+++ b/lib/mcpu.py
@@ -134,10 +134,13 @@ class Processor(object):
       # This gives a chance to LUs to make last-minute changes after acquiring
       # locks at any preceding level.
       lu.DeclareLocks(level)
+      needed_locks = lu.needed_locks[level]
+      share = lu.share_locks[level]
       # This is always safe to do, as we can't acquire more/less locks than
       # what was requested.
       lu.needed_locks[level] = self.context.glm.acquire(level,
-                                                        lu.needed_locks[level])
+                                                        needed_locks,
+                                                        shared=share)
       try:
         result = self._LockAndExecLU(lu, level + 1)
       finally:
-- 
GitLab