From 35705d8fa97ea397857f048b90735ba802003bc4 Mon Sep 17 00:00:00 2001
From: Guido Trotter <ultrotter@google.com>
Date: Mon, 18 Aug 2008 12:51:58 +0000
Subject: [PATCH] Parallelize LUQueryNodes

As for LUQueryInstances the first version just acquires a shared lock on all
nodes. In the future further optimizations are possible, as outlined by
comments in the code.

Reviewed-by: imsnah
---
 lib/cmdlib.py | 26 +++++++++++++++++++-------
 1 file changed, 19 insertions(+), 7 deletions(-)

diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index 39a5d67e3..a60c5270d 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -1366,13 +1366,9 @@ class LUQueryNodes(NoHooksLU):
 
   """
   _OP_REQP = ["output_fields", "names"]
+  REQ_BGL = False
 
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the fields required are valid output fields.
-
-    """
+  def ExpandNames(self):
     self.dynamic_fields = frozenset([
       "dtotal", "dfree",
       "mtotal", "mnode", "mfree",
@@ -1386,7 +1382,23 @@ class LUQueryNodes(NoHooksLU):
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
-    self.wanted = _GetWantedNodes(self, self.op.names)
+    self.needed_locks = {}
+    self.share_locks[locking.LEVEL_NODE] = 1
+    # TODO: we could lock nodes only if the user asked for dynamic fields. For
+    # that we need atomic ways to get info for a group of nodes from the
+    # config, though.
+    if not self.op.names:
+      self.needed_locks[locking.LEVEL_NODE] = None
+    else:
+       self.needed_locks[locking.LEVEL_NODE] = \
+         _GetWantedNodes(self, self.op.names)
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    """
+    # This of course is valid only if we locked the nodes
+    self.wanted = self.needed_locks[locking.LEVEL_NODE]
 
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.
-- 
GitLab