diff --git a/lib/mcpu.py b/lib/mcpu.py index 89046c60cdf64e575cce43b592969530b790e0ea..3ed4ca50a6ffa6c51331a70a62f0ae610ab9fc1e 100644 --- a/lib/mcpu.py +++ b/lib/mcpu.py @@ -176,7 +176,7 @@ class Processor(object): """Object which runs OpCodes""" DISPATCH_TABLE = _ComputeDispatchTable() - def __init__(self, context, ec_id): + def __init__(self, context, ec_id, enable_locks=True): """Constructor for Processor @type context: GanetiContext @@ -190,6 +190,16 @@ class Processor(object): self._cbs = None self.rpc = rpc.RpcRunner(context.cfg) self.hmclass = HooksMaster + self._enable_locks = enable_locks + + def _CheckLocksEnabled(self): + """Checks if locking is enabled. + + @raise errors.ProgrammerError: In case locking is not enabled + + """ + if not self._enable_locks: + raise errors.ProgrammerError("Attempted to use disabled locks") def _AcquireLocks(self, level, names, shared, timeout, priority): """Acquires locks via the Ganeti lock manager. @@ -206,6 +216,8 @@ class Processor(object): amount of time """ + self._CheckLocksEnabled() + if self._cbs: self._cbs.CheckCancel() @@ -291,6 +303,8 @@ class Processor(object): " others") elif adding_locks or acquiring_locks: + self._CheckLocksEnabled() + lu.DeclareLocks(level) share = lu.share_locks[level] @@ -361,12 +375,17 @@ class Processor(object): self._cbs = cbs try: - # Acquire the Big Ganeti Lock exclusively if this LU requires it, - # and in a shared fashion otherwise (to prevent concurrent run with - # an exclusive LU. - self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL, - not lu_class.REQ_BGL, calc_timeout(), - priority) + if self._enable_locks: + # Acquire the Big Ganeti Lock exclusively if this LU requires it, + # and in a shared fashion otherwise (to prevent concurrent run with + # an exclusive LU. + self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL, + not lu_class.REQ_BGL, calc_timeout(), + priority) + elif lu_class.REQ_BGL: + raise errors.ProgrammerError("Opcode '%s' requires BGL, but locks are" + " disabled" % op.OP_ID) + try: lu = lu_class(self, op, self.context, self.rpc) lu.ExpandNames() @@ -379,7 +398,10 @@ class Processor(object): if self._ec_id: self.context.cfg.DropECReservations(self._ec_id) finally: - self.context.glm.release(locking.LEVEL_CLUSTER) + # Release BGL if owned + if self.context.glm.is_owned(locking.LEVEL_CLUSTER): + assert self._enable_locks + self.context.glm.release(locking.LEVEL_CLUSTER) finally: self._cbs = None diff --git a/lib/server/masterd.py b/lib/server/masterd.py index 27bf561c4bb681cd287110594c0db1a025bfc692..e5d8c8833a1862edeaf625ab9d14a7fde8e250a7 100644 --- a/lib/server/masterd.py +++ b/lib/server/masterd.py @@ -361,7 +361,7 @@ class ClientOps: """ # Queries don't have a job id - proc = mcpu.Processor(self.server.context, None) + proc = mcpu.Processor(self.server.context, None, enable_locks=False) # TODO: Executing an opcode using locks will acquire them in blocking mode. # Consider using a timeout for retries.