From 6fe4baf041f54e5e95160e9ae70a56ec99d96ad5 Mon Sep 17 00:00:00 2001 From: Michael Hanselmann <hansmi@google.com> Date: Wed, 21 Mar 2012 18:11:48 +0100 Subject: [PATCH] Revert "Stop acquiring BGL for LUXI queries" This reverts commit 0fa753bad2cf5a0cf88953347e5da3aebbf21956. Turns out there are more queries acquiring locks than we'd like. This patch goes to version 2.6 and a separate patch fixes the immediate issues in LUClusterVerifyConfig. Signed-off-by: Michael Hanselmann <hansmi@google.com> Reviewed-by: Bernardo Dal Seno <bdalseno@google.com> --- lib/mcpu.py | 38 ++++++++------------------------------ lib/server/masterd.py | 2 +- 2 files changed, 9 insertions(+), 31 deletions(-) diff --git a/lib/mcpu.py b/lib/mcpu.py index 3ed4ca50a..89046c60c 100644 --- a/lib/mcpu.py +++ b/lib/mcpu.py @@ -176,7 +176,7 @@ class Processor(object): """Object which runs OpCodes""" DISPATCH_TABLE = _ComputeDispatchTable() - def __init__(self, context, ec_id, enable_locks=True): + def __init__(self, context, ec_id): """Constructor for Processor @type context: GanetiContext @@ -190,16 +190,6 @@ class Processor(object): self._cbs = None self.rpc = rpc.RpcRunner(context.cfg) self.hmclass = HooksMaster - self._enable_locks = enable_locks - - def _CheckLocksEnabled(self): - """Checks if locking is enabled. - - @raise errors.ProgrammerError: In case locking is not enabled - - """ - if not self._enable_locks: - raise errors.ProgrammerError("Attempted to use disabled locks") def _AcquireLocks(self, level, names, shared, timeout, priority): """Acquires locks via the Ganeti lock manager. @@ -216,8 +206,6 @@ class Processor(object): amount of time """ - self._CheckLocksEnabled() - if self._cbs: self._cbs.CheckCancel() @@ -303,8 +291,6 @@ class Processor(object): " others") elif adding_locks or acquiring_locks: - self._CheckLocksEnabled() - lu.DeclareLocks(level) share = lu.share_locks[level] @@ -375,17 +361,12 @@ class Processor(object): self._cbs = cbs try: - if self._enable_locks: - # Acquire the Big Ganeti Lock exclusively if this LU requires it, - # and in a shared fashion otherwise (to prevent concurrent run with - # an exclusive LU. - self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL, - not lu_class.REQ_BGL, calc_timeout(), - priority) - elif lu_class.REQ_BGL: - raise errors.ProgrammerError("Opcode '%s' requires BGL, but locks are" - " disabled" % op.OP_ID) - + # Acquire the Big Ganeti Lock exclusively if this LU requires it, + # and in a shared fashion otherwise (to prevent concurrent run with + # an exclusive LU. + self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL, + not lu_class.REQ_BGL, calc_timeout(), + priority) try: lu = lu_class(self, op, self.context, self.rpc) lu.ExpandNames() @@ -398,10 +379,7 @@ class Processor(object): if self._ec_id: self.context.cfg.DropECReservations(self._ec_id) finally: - # Release BGL if owned - if self.context.glm.is_owned(locking.LEVEL_CLUSTER): - assert self._enable_locks - self.context.glm.release(locking.LEVEL_CLUSTER) + self.context.glm.release(locking.LEVEL_CLUSTER) finally: self._cbs = None diff --git a/lib/server/masterd.py b/lib/server/masterd.py index e5d8c8833..27bf561c4 100644 --- a/lib/server/masterd.py +++ b/lib/server/masterd.py @@ -361,7 +361,7 @@ class ClientOps: """ # Queries don't have a job id - proc = mcpu.Processor(self.server.context, None, enable_locks=False) + proc = mcpu.Processor(self.server.context, None) # TODO: Executing an opcode using locks will acquire them in blocking mode. # Consider using a timeout for retries. -- GitLab