Commit da4a52a3 authored by Thomas Thrainer's avatar Thomas Thrainer
Browse files

Index instances by their UUID



No longer index instances by their name but by their UUID in the cluster
config. This change changes large parts of the code, as the following
adjustments were necessary:
 * Change the index key to UUID in the configuration and the
   ConfigWriter, including all methods.
 * External interfaces (command line interface, IAllocator interface,
   hook scripts, etc.) are kept stable.
 * Instance UUID's are resolved in ExpandNames and then stored in the
   OpCode. This allows to check for instance renames if the OpCode is
   reloaded after a cluster restart. This check is currently only done
   for single instance parameters.
 * Instance locking unfortunately can't use instances UUID as
   identifiers. The reasons is that new instances (which have no UUID
   yet) have to be locked as well, so the instance name is used.
 * Variable names are renamed to follow the following pattern:
   - Suffix is 'inst' or 'insts': Variable holds Instance objects
   - Suffix is 'name' or 'names': Variable holds Instance names
   - Suffix is 'uuid' or 'uuids': Variable holds Instance UUID's
 * Tests are adapted.
Signed-off-by: default avatarThomas Thrainer <thomasth@google.com>
Reviewed-by: default avatarKlaus Aehlig <aehlig@google.com>
parent b691385f
......@@ -136,7 +136,7 @@ class LUBackupPrepare(NoHooksLU):
"""Check prerequisites.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
self.instance = self.cfg.GetInstanceInfoByName(self.op.instance_name)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckNodeOnline(self, self.instance.primary_node)
......@@ -259,7 +259,7 @@ class LUBackupExport(LogicalUnit):
This checks that the instance and node names are valid.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
self.instance = self.cfg.GetInstanceInfoByName(self.op.instance_name)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckNodeOnline(self, self.instance.primary_node)
......@@ -504,13 +504,13 @@ class LUBackupRemove(NoHooksLU):
"""Remove any export.
"""
instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
(_, inst_name) = self.cfg.ExpandInstanceName(self.op.instance_name)
# If the instance was not found we'll try with the name that was passed in.
# This will only work if it was an FQDN, though.
fqdn_warn = False
if not instance_name:
if not inst_name:
fqdn_warn = True
instance_name = self.op.instance_name
inst_name = self.op.instance_name
locked_nodes = self.owned_locks(locking.LEVEL_NODE)
exportlist = self.rpc.call_export_list(locked_nodes)
......@@ -521,13 +521,13 @@ class LUBackupRemove(NoHooksLU):
self.LogWarning("Failed to query node %s (continuing): %s",
self.cfg.GetNodeName(node_uuid), msg)
continue
if instance_name in exportlist[node_uuid].payload:
if inst_name in exportlist[node_uuid].payload:
found = True
result = self.rpc.call_export_remove(node_uuid, instance_name)
result = self.rpc.call_export_remove(node_uuid, inst_name)
msg = result.fail_msg
if msg:
logging.error("Could not remove export for instance %s"
" on node %s: %s", instance_name,
" on node %s: %s", inst_name,
self.cfg.GetNodeName(node_uuid), msg)
if fqdn_warn and not found:
......
......@@ -28,7 +28,7 @@ from ganeti import constants
from ganeti import locking
from ganeti import query
from ganeti import utils
from ganeti.cmdlib.common import ExpandInstanceName
from ganeti.cmdlib.common import ExpandInstanceUuidAndName
class ResultWithJobs:
......@@ -322,8 +322,9 @@ class LogicalUnit(object):
else:
assert locking.LEVEL_INSTANCE not in self.needed_locks, \
"_ExpandAndLockInstance called with instance-level locks set"
self.op.instance_name = ExpandInstanceName(self.cfg,
self.op.instance_name)
(self.op.instance_uuid, self.op.instance_name) = \
ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
self.op.instance_name)
self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
def _LockInstancesNodes(self, primary_only=False,
......@@ -361,7 +362,7 @@ class LogicalUnit(object):
# of self.recalculate_locks[locking.LEVEL_NODE]
wanted_node_uuids = []
locked_i = self.owned_locks(locking.LEVEL_INSTANCE)
for _, instance in self.cfg.GetMultiInstanceInfo(locked_i):
for _, instance in self.cfg.GetMultiInstanceInfoByName(locked_i):
wanted_node_uuids.append(instance.primary_node)
if not primary_only:
wanted_node_uuids.extend(instance.secondary_nodes)
......
This diff is collapsed.
......@@ -65,9 +65,15 @@ def _ExpandItemName(expand_fn, name, kind):
return full_name
def ExpandInstanceName(cfg, name):
def ExpandInstanceUuidAndName(cfg, expected_uuid, name):
"""Wrapper over L{_ExpandItemName} for instance."""
return _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
(uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
if expected_uuid is not None and uuid != expected_uuid:
raise errors.OpPrereqError(
"The instances UUID '%s' does not match the expected UUID '%s' for"
" instance '%s'. Maybe the instance changed since you submitted this"
" job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
return (uuid, full_name)
def ExpandNodeUuidAndName(cfg, expected_uuid, name):
......@@ -99,25 +105,26 @@ def ShareAll():
return dict.fromkeys(locking.LEVELS, 1)
def CheckNodeGroupInstances(cfg, group_uuid, owned_instances):
def CheckNodeGroupInstances(cfg, group_uuid, owned_instance_names):
"""Checks if the instances in a node group are still correct.
@type cfg: L{config.ConfigWriter}
@param cfg: The cluster configuration
@type group_uuid: string
@param group_uuid: Node group UUID
@type owned_instances: set or frozenset
@param owned_instances: List of currently owned instances
@type owned_instance_names: set or frozenset
@param owned_instance_names: List of currently owned instances
"""
wanted_instances = cfg.GetNodeGroupInstances(group_uuid)
if owned_instances != wanted_instances:
wanted_instances = frozenset(cfg.GetInstanceNames(
cfg.GetNodeGroupInstances(group_uuid)))
if owned_instance_names != wanted_instances:
raise errors.OpPrereqError("Instances in node group '%s' changed since"
" locks were acquired, wanted '%s', have '%s';"
" retry the operation" %
(group_uuid,
utils.CommaJoin(wanted_instances),
utils.CommaJoin(owned_instances)),
utils.CommaJoin(owned_instance_names)),
errors.ECODE_STATE)
return wanted_instances
......@@ -144,24 +151,25 @@ def GetWantedNodes(lu, short_node_names):
return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
def GetWantedInstances(lu, instances):
def GetWantedInstances(lu, short_inst_names):
"""Returns list of checked and expanded instance names.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instances: list
@param instances: list of instance names or None for all instances
@rtype: list
@return: the list of instances, sorted
@type short_inst_names: list
@param short_inst_names: list of instance names or None for all instances
@rtype: tuple of lists
@return: tuple of (instance UUIDs, instance names)
@raise errors.OpPrereqError: if the instances parameter is wrong type
@raise errors.OpPrereqError: if any of the passed instances is not found
"""
if instances:
wanted = [ExpandInstanceName(lu.cfg, name) for name in instances]
if short_inst_names:
inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0]
for name in short_inst_names]
else:
wanted = utils.NiceSort(lu.cfg.GetInstanceList())
return wanted
inst_uuids = lu.cfg.GetInstanceList()
return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
def RunPostHook(lu, node_name):
......@@ -794,7 +802,7 @@ def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_node_uuids,
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration
@type instances: dict; string as key, L{objects.Instance} as value
@param instances: Dictionary, instance name as key, instance object as value
@param instances: Dictionary, instance UUID as key, instance object as value
@type owned_groups: iterable of string
@param owned_groups: List of owned groups
@type owned_node_uuids: iterable of string
......@@ -803,38 +811,37 @@ def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_node_uuids,
@param cur_group_uuid: Optional group UUID to check against instance's groups
"""
for (name, inst) in instances.items():
for (uuid, inst) in instances.items():
assert owned_node_uuids.issuperset(inst.all_nodes), \
"Instance %s's nodes changed while we kept the lock" % name
"Instance %s's nodes changed while we kept the lock" % inst.name
inst_groups = CheckInstanceNodeGroups(cfg, name, owned_groups)
inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
"Instance %s has no node in group %s" % (name, cur_group_uuid)
"Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
def CheckInstanceNodeGroups(cfg, instance_name, owned_groups,
primary_only=False):
def CheckInstanceNodeGroups(cfg, inst_uuid, owned_groups, primary_only=False):
"""Checks if the owned node groups are still correct for an instance.
@type cfg: L{config.ConfigWriter}
@param cfg: The cluster configuration
@type instance_name: string
@param instance_name: Instance name
@type inst_uuid: string
@param inst_uuid: Instance UUID
@type owned_groups: set or frozenset
@param owned_groups: List of currently owned node groups
@type primary_only: boolean
@param primary_only: Whether to check node groups for only the primary node
"""
inst_groups = cfg.GetInstanceNodeGroups(instance_name, primary_only)
inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)
if not owned_groups.issuperset(inst_groups):
raise errors.OpPrereqError("Instance %s's node groups changed since"
" locks were acquired, current groups are"
" are '%s', owning groups '%s'; retry the"
" operation" %
(instance_name,
(cfg.GetInstanceName(inst_uuid),
utils.CommaJoin(inst_groups),
utils.CommaJoin(owned_groups)),
errors.ECODE_STATE)
......
......@@ -203,7 +203,8 @@ class LUGroupAssignNodes(NoHooksLU):
self.node_data, instance_data)
if new_splits:
fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
fmt_new_splits = utils.CommaJoin(utils.NiceSort(
self.cfg.GetInstanceNames(new_splits)))
if not self.op.force:
raise errors.OpExecError("The following instances get split by this"
......@@ -216,7 +217,8 @@ class LUGroupAssignNodes(NoHooksLU):
if previous_splits:
self.LogWarning("In addition, these already-split instances continue"
" to be split across groups: %s",
utils.CommaJoin(utils.NiceSort(previous_splits)))
utils.CommaJoin(utils.NiceSort(
self.cfg.GetInstanceNames(previous_splits))))
def Exec(self, feedback_fn):
"""Assign nodes to a new group.
......@@ -262,11 +264,11 @@ class LUGroupAssignNodes(NoHooksLU):
if len(set(node_data[node_uuid].group
for node_uuid in inst.all_nodes)) > 1:
previously_split_instances.add(inst.name)
previously_split_instances.add(inst.uuid)
if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
for node_uuid in inst.all_nodes)) > 1:
all_split_instances.add(inst.name)
all_split_instances.add(inst.uuid)
return (list(all_split_instances - previously_split_instances),
list(previously_split_instances & all_split_instances))
......@@ -339,7 +341,7 @@ class GroupQuery(QueryBase):
for instance in all_instances.values():
node = instance.primary_node
if node in node_to_group:
group_to_instances[node_to_group[node]].append(instance.name)
group_to_instances[node_to_group[node]].append(instance.uuid)
if not do_nodes:
# Do not pass on node information if it was not requested.
......@@ -412,7 +414,8 @@ class LUGroupSetParams(LogicalUnit):
# Lock instances optimistically, needs verification once group lock has
# been acquired
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetNodeGroupInstances(self.group_uuid)
self.cfg.GetInstanceNames(
self.cfg.GetNodeGroupInstances(self.group_uuid))
@staticmethod
def _UpdateAndVerifyDiskParams(old, new):
......@@ -427,10 +430,10 @@ class LUGroupSetParams(LogicalUnit):
"""Check prerequisites.
"""
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
# Check if locked instances are still correct
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
self.group = self.cfg.GetNodeGroup(self.group_uuid)
cluster = self.cfg.GetClusterInfo()
......@@ -477,8 +480,7 @@ class LUGroupSetParams(LogicalUnit):
group_policy=True)
new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
inst_filter = lambda inst: inst.name in owned_instances
instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
instances = self.cfg.GetMultiInstanceInfoByName(owned_instance_names)
gmi = ganeti.masterd.instance
violations = \
ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
......@@ -709,7 +711,8 @@ class LUGroupEvacuate(LogicalUnit):
# Lock instances optimistically, needs verification once node and group
# locks have been acquired
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetNodeGroupInstances(self.group_uuid)
self.cfg.GetInstanceNames(
self.cfg.GetNodeGroupInstances(self.group_uuid))
elif level == locking.LEVEL_NODEGROUP:
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
......@@ -723,7 +726,9 @@ class LUGroupEvacuate(LogicalUnit):
for instance_name in
self.owned_locks(locking.LEVEL_INSTANCE)
for group_uuid in
self.cfg.GetInstanceNodeGroups(instance_name))
self.cfg.GetInstanceNodeGroups(
self.cfg.GetInstanceInfoByName(instance_name)
.uuid))
else:
# No target groups, need to lock all of them
lock_groups = locking.ALL_SET
......@@ -746,7 +751,7 @@ class LUGroupEvacuate(LogicalUnit):
self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
def CheckPrereq(self):
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
......@@ -754,10 +759,11 @@ class LUGroupEvacuate(LogicalUnit):
assert self.group_uuid in owned_groups
# Check if locked instances are still correct
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
# Get instance information
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
self.instances = \
dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
# Check if node groups for locked instances are still correct
CheckInstancesNodeGroups(self.cfg, self.instances,
......@@ -797,11 +803,11 @@ class LUGroupEvacuate(LogicalUnit):
return (run_nodes, run_nodes)
def Exec(self, feedback_fn):
instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
inst_names = list(self.owned_locks(locking.LEVEL_INSTANCE))
assert self.group_uuid not in self.target_uuids
req = iallocator.IAReqGroupChange(instances=instances,
req = iallocator.IAReqGroupChange(instances=inst_names,
target_groups=self.target_uuids)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
......@@ -851,7 +857,8 @@ class LUGroupVerifyDisks(NoHooksLU):
# Lock instances optimistically, needs verification once node and group
# locks have been acquired
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetNodeGroupInstances(self.group_uuid)
self.cfg.GetInstanceNames(
self.cfg.GetNodeGroupInstances(self.group_uuid))
elif level == locking.LEVEL_NODEGROUP:
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
......@@ -863,7 +870,9 @@ class LUGroupVerifyDisks(NoHooksLU):
# later on
[group_uuid
for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
for group_uuid in
self.cfg.GetInstanceNodeGroups(
self.cfg.GetInstanceInfoByName(instance_name).uuid)])
elif level == locking.LEVEL_NODE:
# This will only lock the nodes in the group to be verified which contain
......@@ -877,17 +886,17 @@ class LUGroupVerifyDisks(NoHooksLU):
self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
def CheckPrereq(self):
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
assert self.group_uuid in owned_groups
# Check if locked instances are still correct
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names)
# Get instance information
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
self.instances = dict(self.cfg.GetMultiInstanceInfoByName(owned_inst_names))
# Check if node groups for locked instances are still correct
CheckInstancesNodeGroups(self.cfg, self.instances,
......
......@@ -48,7 +48,7 @@ from ganeti.cmdlib.common import INSTANCE_DOWN, \
ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
from ganeti.cmdlib.instance_storage import CreateDisks, \
CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
......@@ -392,10 +392,10 @@ class LUInstanceCreate(LogicalUnit):
# instance name verification
if self.op.name_check:
self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
self.op.instance_name = self.hostname1.name
self.hostname = _CheckHostnameSane(self, self.op.instance_name)
self.op.instance_name = self.hostname.name
# used in CheckPrereq for ip ping check
self.check_ip = self.hostname1.ip
self.check_ip = self.hostname.ip
else:
self.check_ip = None
......@@ -503,7 +503,8 @@ class LUInstanceCreate(LogicalUnit):
# this is just a preventive check, but someone might still add this
# instance in the meantime, and creation will fail at lock-add time
if self.op.instance_name in self.cfg.GetInstanceList():
if self.op.instance_name in\
[inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
self.op.instance_name, errors.ECODE_EXISTS)
......@@ -1188,13 +1189,15 @@ class LUInstanceCreate(LogicalUnit):
else:
network_port = None
instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
# This is ugly but we got a chicken-egg problem here
# We can only take the group disk parameters, as the instance
# has no disks yet (we are generating them right here).
nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
disks = GenerateDiskTemplate(self,
self.op.disk_template,
self.op.instance_name, self.pnode.uuid,
instance_uuid, self.pnode.uuid,
self.secondaries,
self.disks,
self.instance_file_storage_dir,
......@@ -1203,7 +1206,9 @@ class LUInstanceCreate(LogicalUnit):
feedback_fn,
self.cfg.GetGroupDiskParams(nodegroup))
iobj = objects.Instance(name=self.op.instance_name, os=self.op.os_type,
iobj = objects.Instance(name=self.op.instance_name,
uuid=instance_uuid,
os=self.op.os_type,
primary_node=self.pnode.uuid,
nics=self.nics, disks=disks,
disk_template=self.op.disk_template,
......@@ -1281,7 +1286,7 @@ class LUInstanceCreate(LogicalUnit):
if disk_abort:
RemoveDisks(self, iobj)
self.cfg.RemoveInstance(iobj.name)
self.cfg.RemoveInstance(iobj.uuid)
# Make sure the instance lock gets removed
self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
raise errors.OpExecError("There are some degraded disks for"
......@@ -1455,9 +1460,10 @@ class LUInstanceRename(LogicalUnit):
This checks that the instance is in the cluster and is not running.
"""
self.op.instance_name = ExpandInstanceName(self.cfg,
self.op.instance_name)
instance = self.cfg.GetInstanceInfo(self.op.instance_name)
(self.op.instance_uuid, self.op.instance_name) = \
ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
self.op.instance_name)
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert instance is not None
CheckNodeOnline(self, instance.primary_node)
CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
......@@ -1474,8 +1480,9 @@ class LUInstanceRename(LogicalUnit):
(hostname.ip, new_name),
errors.ECODE_NOTUNIQUE)
instance_list = self.cfg.GetInstanceList()
if new_name in instance_list and new_name != instance.name:
instance_names = [inst.name for
inst in self.cfg.GetAllInstancesInfo().values()]
if new_name in instance_names and new_name != instance.name:
raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
new_name, errors.ECODE_EXISTS)
......@@ -1492,7 +1499,7 @@ class LUInstanceRename(LogicalUnit):
self.instance.disks[0].logical_id[1])
rename_file_storage = True
self.cfg.RenameInstance(self.instance.name, self.op.new_name)
self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
# Change the instance lock. This is definitely safe while we hold the BGL.
# Otherwise the new lock would have to be added in acquired mode.
assert self.REQ_BGL
......@@ -1501,7 +1508,7 @@ class LUInstanceRename(LogicalUnit):
self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
# re-read the instance from the configuration after rename
renamed_inst = self.cfg.GetInstanceInfo(self.op.new_name)
renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
if rename_file_storage:
new_file_storage_dir = os.path.dirname(
......@@ -1584,7 +1591,7 @@ class LUInstanceRemove(LogicalUnit):
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
......@@ -1670,7 +1677,7 @@ class LUInstanceMove(LogicalUnit):
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
......@@ -1752,7 +1759,7 @@ class LUInstanceMove(LogicalUnit):
CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
except errors.OpExecError:
self.LogWarning("Device creation failed")
self.cfg.ReleaseDRBDMinors(self.instance.name)
self.cfg.ReleaseDRBDMinors(self.instance.uuid)
raise
cluster_name = self.cfg.GetClusterInfo().cluster_name
......@@ -1785,7 +1792,7 @@ class LUInstanceMove(LogicalUnit):
try:
RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
finally:
self.cfg.ReleaseDRBDMinors(self.instance.name)
self.cfg.ReleaseDRBDMinors(self.instance.uuid)
raise errors.OpExecError("Errors during disk copy: %s" %
(",".join(errs),))
......@@ -2376,7 +2383,7 @@ class LUInstanceSetParams(LogicalUnit):
# Acquire locks for the instance's nodegroups optimistically. Needs
# to be verified in CheckPrereq
self.needed_locks[locking.LEVEL_NODEGROUP] = \
self.cfg.GetInstanceNodeGroups(self.op.instance_name)
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
elif level == locking.LEVEL_NODE:
self._LockInstancesNodes()
if self.op.disk_template and self.op.remote_node:
......@@ -2714,7 +2721,7 @@ class LUInstanceSetParams(LogicalUnit):
"""
assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
self.cluster = self.cfg.GetClusterInfo()
assert self.instance is not None, \
......@@ -3031,7 +3038,7 @@ class LUInstanceSetParams(LogicalUnit):
constants.IDISK_NAME: d.name}
for d in self.instance.disks]
new_disks = GenerateDiskTemplate(self, self.op.disk_template,
self.instance.name, pnode_uuid,
self.instance.uuid, pnode_uuid,
[snode_uuid], disk_info, None, None, 0,
feedback_fn, self.diskparams)
anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
......@@ -3160,7 +3167,7 @@ class LUInstanceSetParams(LogicalUnit):
disk = \
GenerateDiskTemplate(self, self.instance.disk_template,
self.instance.name, self.instance.primary_node,
self.instance.uuid, self.instance.primary_node,
self.instance.secondary_nodes, [params], file_path,
file_driver, idx, self.Log, self.diskparams)[0]
......@@ -3314,7 +3321,7 @@ class LUInstanceSetParams(LogicalUnit):
try:
self._DISK_CONVERSIONS[mode](self, feedback_fn)
except:
self.cfg.ReleaseDRBDMinors(self.instance.name)
self.cfg.ReleaseDRBDMinors(self.instance.uuid)
raise
result.append(("disk_template", self.op.disk_template))
......@@ -3359,11 +3366,11 @@ class LUInstanceSetParams(LogicalUnit):
pass
elif self.op.offline:
# Mark instance as offline
self.cfg.MarkInstanceOffline(self.instance.name)
self.cfg.MarkInstanceOffline(self.instance.uuid)
result.append(("admin_state", constants.ADMINST_OFFLINE))
else:
# Mark instance as online, but stopped
self.cfg.MarkInstanceDown(self.instance.name)
self.cfg.MarkInstanceDown(self.instance.uuid)
result.append(("admin_state", constants.ADMINST_DOWN))
self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
......@@ -3413,7 +3420,7 @@ class LUInstanceChangeGroup(LogicalUnit):
# Lock all groups used by instance optimistically; this requires going
# via the node before it's locked, requiring verification later on
instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
lock_groups.update(instance_groups)
else:
# No target groups, need to lock all of them
......@@ -3429,7 +3436,7 @@ class LUInstanceChangeGroup(LogicalUnit):
# Lock all nodes in all potential target groups
lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
self.cfg.GetInstanceNodeGroups(self.op.instance_name))
self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
member_nodes = [node_uuid
for group in lock_groups