Commit 00828204 authored by Klaus Aehlig's avatar Klaus Aehlig
Browse files

Clean up remove_locks



Locks are now identified by their name, and no longer
a resource in the master daemon. In particular, there is
no more need for explicitly removing the locks; freeing
them is enough.
Signed-off-by: default avatarKlaus Aehlig <aehlig@google.com>
Reviewed-by: default avatarPetr Pudlak <pudlak@google.com>
parent c877d159
...@@ -133,7 +133,6 @@ class LogicalUnit(object): ...@@ -133,7 +133,6 @@ class LogicalUnit(object):
self.opportunistic_locks = dict.fromkeys(locking.LEVELS, False) self.opportunistic_locks = dict.fromkeys(locking.LEVELS, False)
self.add_locks = {} self.add_locks = {}
self.remove_locks = {}
# Used to force good behavior when calling helper functions # Used to force good behavior when calling helper functions
self.recalculate_locks = {} self.recalculate_locks = {}
......
...@@ -182,7 +182,6 @@ class LUGroupAdd(LogicalUnit): ...@@ -182,7 +182,6 @@ class LUGroupAdd(LogicalUnit):
disk_state_static=self.new_disk_state) disk_state_static=self.new_disk_state)
self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False) self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
del self.remove_locks[locking.LEVEL_NODEGROUP]
network_name = self.cfg.GetClusterInfo().instance_communication_network network_name = self.cfg.GetClusterInfo().instance_communication_network
if network_name: if network_name:
...@@ -570,8 +569,6 @@ class LUGroupRemove(LogicalUnit): ...@@ -570,8 +569,6 @@ class LUGroupRemove(LogicalUnit):
raise errors.OpExecError("Group '%s' with UUID %s disappeared" % raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
(self.op.group_name, self.group_uuid)) (self.op.group_name, self.group_uuid))
self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
class LUGroupRename(LogicalUnit): class LUGroupRename(LogicalUnit):
HPATH = "group-rename" HPATH = "group-rename"
......
...@@ -1368,8 +1368,6 @@ class LUInstanceCreate(LogicalUnit): ...@@ -1368,8 +1368,6 @@ class LUInstanceCreate(LogicalUnit):
for disk_uuid in instance.disks: for disk_uuid in instance.disks:
self.cfg.RemoveInstanceDisk(instance.uuid, disk_uuid) self.cfg.RemoveInstanceDisk(instance.uuid, disk_uuid)
self.cfg.RemoveInstance(instance.uuid) self.cfg.RemoveInstance(instance.uuid)
# Make sure the instance lock gets removed
self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
raise errors.OpExecError("There are some degraded disks for" raise errors.OpExecError("There are some degraded disks for"
" this instance") " this instance")
...@@ -1460,10 +1458,6 @@ class LUInstanceCreate(LogicalUnit): ...@@ -1460,10 +1458,6 @@ class LUInstanceCreate(LogicalUnit):
# re-read the instance from the configuration # re-read the instance from the configuration
iobj = self.cfg.GetInstanceInfo(iobj.uuid) iobj = self.cfg.GetInstanceInfo(iobj.uuid)
# Declare that we don't want to remove the instance lock anymore, as we've
# added the instance to the config
del self.remove_locks[locking.LEVEL_INSTANCE]
if self.op.mode == constants.INSTANCE_IMPORT: if self.op.mode == constants.INSTANCE_IMPORT:
# Release unused nodes # Release unused nodes
ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid]) ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
......
...@@ -251,12 +251,6 @@ def RemoveInstance(lu, feedback_fn, instance, ignore_failures): ...@@ -251,12 +251,6 @@ def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
logging.info("Removing instance %s out of cluster config", instance.name) logging.info("Removing instance %s out of cluster config", instance.name)
lu.cfg.RemoveInstance(instance.uuid) lu.cfg.RemoveInstance(instance.uuid)
assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
"Instance lock removal conflict"
# Remove lock for the instance
lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False): def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
"""Remove all disks for an instance. """Remove all disks for an instance.
......
...@@ -196,7 +196,6 @@ class LUNetworkAdd(LogicalUnit): ...@@ -196,7 +196,6 @@ class LUNetworkAdd(LogicalUnit):
nobj.AddTag(tag) nobj.AddTag(tag)
self.cfg.AddNetwork(nobj, self.proc.GetECId(), check_uuid=False) self.cfg.AddNetwork(nobj, self.proc.GetECId(), check_uuid=False)
del self.remove_locks[locking.LEVEL_NETWORK]
class LUNetworkRemove(LogicalUnit): class LUNetworkRemove(LogicalUnit):
......
...@@ -510,7 +510,6 @@ class Processor(object): ...@@ -510,7 +510,6 @@ class Processor(object):
else: else:
# Adding locks # Adding locks
needed_locks = _LockList(lu.add_locks[level]) needed_locks = _LockList(lu.add_locks[level])
lu.remove_locks[level] = needed_locks
use_opportunistic = False use_opportunistic = False
self._AcquireLocks(level, needed_locks, share, use_opportunistic, self._AcquireLocks(level, needed_locks, share, use_opportunistic,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment