diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index f8db0d1e0337a816de22a8af8f1f6d58855a9580..39a5d67e342142262b3b7883d678960027f5c6d3 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -2452,13 +2452,9 @@ class LUQueryInstances(NoHooksLU):
 
   """
   _OP_REQP = ["output_fields", "names"]
+  REQ_BGL = False
 
-  def CheckPrereq(self):
-    """Check prerequisites.
-
-    This checks that the fields required are valid output fields.
-
-    """
+  def ExpandNames(self):
     self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
     _CheckOutputFields(static=["name", "os", "pnode", "snodes",
                                "admin_state", "admin_ram",
@@ -2467,7 +2463,33 @@ class LUQueryInstances(NoHooksLU):
                        dynamic=self.dynamic_fields,
                        selected=self.op.output_fields)
 
-    self.wanted = _GetWantedInstances(self, self.op.names)
+    self.needed_locks = {}
+    self.share_locks[locking.LEVEL_INSTANCE] = 1
+    self.share_locks[locking.LEVEL_NODE] = 1
+
+    # TODO: we could lock instances (and nodes) only if the user asked for
+    # dynamic fields. For that we need atomic ways to get info for a group of
+    # instances from the config, though.
+    if not self.op.names:
+      self.needed_locks[locking.LEVEL_INSTANCE] = None # Acquire all
+    else:
+      self.needed_locks[locking.LEVEL_INSTANCE] = \
+        _GetWantedInstances(self, self.op.names)
+
+    self.needed_locks[locking.LEVEL_NODE] = []
+    self.recalculate_locks[locking.LEVEL_NODE] = 'replace'
+
+  def DeclareLocks(self, level):
+    # TODO: locking of nodes could be avoided when not querying them
+    if level == locking.LEVEL_NODE:
+      self._LockInstancesNodes()
+
+  def CheckPrereq(self):
+    """Check prerequisites.
+
+    """
+    # This of course is valid only if we locked the instances
+    self.wanted = self.needed_locks[locking.LEVEL_INSTANCE]
 
   def Exec(self, feedback_fn):
     """Computes the list of nodes and their attributes.