Commit 43b1f49f authored by Ilias Tsitsimpis's avatar Ilias Tsitsimpis Committed by Jose A. Lopes
Browse files

Lift the Disk objects from the Instances



This patch replaces 'instance.disks' with 'GetInstanceDisks' everywhere
in the codebase. From now on, the function 'GetInstanceDisks' from the
config file has to be used in order to get the disks of an instance.
Also the functions 'AddInstanceDisk'/'RemoveInstanceDisk' have to be
used to add/remove a disk object to/from the config file.
Signed-off-by: default avatarIlias Tsitsimpis <iliastsi@grnet.gr>
Reviewed-by: default avatarJose A. Lopes <jabolopes@google.com>
parent bca90008
......@@ -277,7 +277,7 @@ class LUBackupExport(LogicalUnit):
# instance disk type verification
# TODO: Implement export support for file-based disks
for disk in self.instance.disks:
for disk in self.cfg.GetInstanceDisks(self.instance.uuid):
if disk.dev_type in constants.DTS_FILEBASED:
raise errors.OpPrereqError("Export not supported for instances with"
" file-based disks", errors.ECODE_INVAL)
......@@ -316,7 +316,7 @@ class LUBackupExport(LogicalUnit):
self.secondary_nodes = \
self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
self.inst_disks = self.instance.disks
self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
def _CleanupExports(self, feedback_fn):
"""Removes exports of current instance from all other nodes.
......@@ -402,7 +402,8 @@ class LUBackupExport(LogicalUnit):
@return: Size of the disks in MiB
"""
return sum([d.size for d in self.instance.disks])
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
return sum([d.size for d in inst_disks])
def ZeroFreeSpace(self, feedback_fn):
"""Zeroes the free space on a shutdown instance.
......
......@@ -611,7 +611,7 @@ class LUClusterRepairDiskSizes(NoHooksLU):
pnode = instance.primary_node
if pnode not in per_node_disks:
per_node_disks[pnode] = []
for idx, disk in enumerate(instance.disks):
for idx, disk in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
per_node_disks[pnode].append((instance, idx, disk))
assert not (frozenset(per_node_disks.keys()) -
......@@ -663,7 +663,7 @@ class LUClusterRepairDiskSizes(NoHooksLU):
" correcting: recorded %d, actual %d", idx,
instance.name, disk.size, size)
disk.size = size
self.cfg.Update(instance, feedback_fn)
self.cfg.Update(disk, feedback_fn)
changed.append((instance.name, idx, "size", size))
if es_flags[node_uuid]:
if spindles is None:
......@@ -675,10 +675,10 @@ class LUClusterRepairDiskSizes(NoHooksLU):
" correcting: recorded %s, actual %s",
idx, instance.name, disk.spindles, spindles)
disk.spindles = spindles
self.cfg.Update(instance, feedback_fn)
self.cfg.Update(disk, feedback_fn)
changed.append((instance.name, idx, "spindles", disk.spindles))
if self._EnsureChildSizes(disk):
self.cfg.Update(instance, feedback_fn)
self.cfg.Update(disk, feedback_fn)
changed.append((instance.name, idx, "size", disk.size))
return changed
......@@ -2405,7 +2405,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
" that have exclusive storage set: %s",
instance.disk_template,
utils.CommaJoin(self.cfg.GetNodeNames(es_nodes)))
for (idx, disk) in enumerate(instance.disks):
for (idx, disk) in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
self._ErrorIf(disk.spindles is None,
constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance.name,
"number of spindles not configured for disk %s while"
......@@ -3083,7 +3083,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
if instanceinfo[uuid].disk_template == diskless)
disks = [(inst_uuid, disk)
for inst_uuid in node_inst_uuids
for disk in instanceinfo[inst_uuid].disks]
for disk in self.cfg.GetInstanceDisks(inst_uuid)]
if not disks:
nodisk_instances.update(uuid for uuid in node_inst_uuids
......
......@@ -616,10 +616,11 @@ def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
cpu_count = be_full[constants.BE_VCPUS]
inst_nodes = cfg.GetInstanceNodes(instance.uuid)
es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
disks = cfg.GetInstanceDisks(instance.uuid)
if any(es_flags.values()):
# With exclusive storage use the actual spindles
try:
spindle_use = sum([disk.spindles for disk in instance.disks])
spindle_use = sum([disk.spindles for disk in disks])
except TypeError:
ret.append("Number of spindles not configured for disks of instance %s"
" while exclusive storage is enabled, try running gnt-cluster"
......@@ -628,8 +629,8 @@ def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
spindle_use = None
else:
spindle_use = be_full[constants.BE_SPINDLE_USE]
disk_count = len(instance.disks)
disk_sizes = [disk.size for disk in instance.disks]
disk_count = len(disks)
disk_sizes = [disk.size for disk in disks]
nic_count = len(instance.nics)
disk_template = instance.disk_template
......@@ -1115,8 +1116,9 @@ def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq):
faulty = []
disks = cfg.GetInstanceDisks(instance.uuid)
result = rpc_runner.call_blockdev_getmirrorstatus(
node_uuid, (instance.disks, instance))
node_uuid, (disks, instance))
result.Raise("Failed to get disk status from node %s" %
cfg.GetNodeName(node_uuid),
prereq=prereq, ecode=errors.ECODE_ENVIRON)
......
......@@ -915,7 +915,8 @@ class LUGroupVerifyDisks(NoHooksLU):
node_to_inst.setdefault(node_uuid, []).append(inst)
for (node_uuid, insts) in node_to_inst.items():
node_disks = [(inst.disks, inst) for inst in insts]
node_disks = [(self.cfg.GetInstanceDisks(inst.uuid), inst)
for inst in insts]
node_res = self.rpc.call_drbd_needs_activation(node_uuid, node_disks)
msg = node_res.fail_msg
if msg:
......@@ -926,7 +927,8 @@ class LUGroupVerifyDisks(NoHooksLU):
faulty_disk_uuids = set(node_res.payload)
for inst in self.instances.values():
inst_disk_uuids = set([disk.uuid for disk in inst.disks])
disks = self.cfg.GetInstanceDisks(inst.uuid)
inst_disk_uuids = set([disk.uuid for disk in disks])
if inst_disk_uuids.intersection(faulty_disk_uuids):
offline_disk_instance_names.add(inst.name)
......
......@@ -1365,6 +1365,8 @@ class LUInstanceCreate(LogicalUnit):
if disk_abort:
RemoveDisks(self, instance)
for disk_uuid in instance.disks:
self.cfg.RemoveInstanceDisk(instance.uuid, disk_uuid)
self.cfg.RemoveInstance(instance.uuid)
# Make sure the instance lock gets removed
self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
......@@ -1411,7 +1413,7 @@ class LUInstanceCreate(LogicalUnit):
uuid=instance_uuid,
os=os_type,
primary_node=self.pnode.uuid,
nics=self.nics, disks=disks,
nics=self.nics, disks=[],
disk_template=self.op.disk_template,
disks_active=False,
admin_state=constants.ADMINST_DOWN,
......@@ -1442,16 +1444,22 @@ class LUInstanceCreate(LogicalUnit):
else:
feedback_fn("* creating instance disks...")
try:
CreateDisks(self, iobj, instance_disks=iobj.disks)
CreateDisks(self, iobj, instance_disks=disks)
except errors.OpExecError:
self.LogWarning("Device creation failed")
self.cfg.ReleaseDRBDMinors(instance_uuid)
raise
feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
self.cfg.AddInstance(iobj, self.proc.GetECId())
feedback_fn("adding disks to cluster config")
for disk in disks:
self.cfg.AddInstanceDisk(iobj.uuid, disk)
# re-read the instance from the configuration
iobj = self.cfg.GetInstanceInfo(iobj.uuid)
# Declare that we don't want to remove the instance lock anymore, as we've
# added the instance to the config
del self.remove_locks[locking.LEVEL_INSTANCE]
......@@ -1498,6 +1506,7 @@ class LUInstanceCreate(LogicalUnit):
ReleaseLocks(self, locking.LEVEL_NODE_RES)
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
disks = self.cfg.GetInstanceDisks(iobj.uuid)
if self.op.mode == constants.INSTANCE_CREATE:
os_image = objects.GetOSImage(self.op.osparams)
......@@ -1507,8 +1516,8 @@ class LUInstanceCreate(LogicalUnit):
if pause_sync:
feedback_fn("* pausing disk sync to install instance OS")
result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
(iobj.disks,
iobj), True)
(disks, iobj),
True)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("pause-sync of instance %s for disk %d failed",
......@@ -1524,8 +1533,8 @@ class LUInstanceCreate(LogicalUnit):
if pause_sync:
feedback_fn("* resuming disk sync")
result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
(iobj.disks,
iobj), False)
(disks, iobj),
False)
for idx, success in enumerate(result.payload):
if not success:
logging.warn("resume-sync of instance %s for disk %d failed",
......@@ -1547,10 +1556,10 @@ class LUInstanceCreate(LogicalUnit):
if iobj.os:
dst_io = constants.IEIO_SCRIPT
dst_ioargs = ((iobj.disks[idx], iobj), idx)
dst_ioargs = ((disks[idx], iobj), idx)
else:
dst_io = constants.IEIO_RAW_DISK
dst_ioargs = (iobj.disks[idx], iobj)
dst_ioargs = (disks[idx], iobj)
# FIXME: pass debug option from opcode to backend
dt = masterd.instance.DiskTransfer("disk/%s" % idx,
......@@ -1717,8 +1726,8 @@ class LUInstanceRename(LogicalUnit):
if (self.instance.disk_template in (constants.DT_FILE,
constants.DT_SHARED_FILE) and
self.op.new_name != self.instance.name):
old_file_storage_dir = os.path.dirname(
self.instance.disks[0].logical_id[1])
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
old_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
rename_file_storage = True
self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
......@@ -1729,10 +1738,10 @@ class LUInstanceRename(LogicalUnit):
# re-read the instance from the configuration after rename
renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
disks = self.cfg.GetInstanceDisks(renamed_inst.uuid)
if rename_file_storage:
new_file_storage_dir = os.path.dirname(
renamed_inst.disks[0].logical_id[1])
new_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
old_file_storage_dir,
new_file_storage_dir)
......@@ -1746,7 +1755,7 @@ class LUInstanceRename(LogicalUnit):
# update info on disks
info = GetInstanceInfoText(renamed_inst)
for (idx, disk) in enumerate(renamed_inst.disks):
for (idx, disk) in enumerate(disks):
for node_uuid in self.cfg.GetInstanceNodes(renamed_inst.uuid):
result = self.rpc.call_blockdev_setinfo(node_uuid,
(disk, renamed_inst), info)
......@@ -1820,7 +1829,7 @@ class LUInstanceRemove(LogicalUnit):
"Cannot retrieve locked instance %s" % self.op.instance_name
self.secondary_nodes = \
self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
self.inst_disks = self.instance.disks
self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
def Exec(self, feedback_fn):
"""Remove the instance.
......@@ -1926,7 +1935,8 @@ class LUInstanceMove(LogicalUnit):
cluster = self.cfg.GetClusterInfo()
bep = cluster.FillBE(self.instance)
for idx, dsk in enumerate(self.instance.disks):
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for idx, dsk in enumerate(disks):
if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
constants.DT_SHARED_FILE, constants.DT_GLUSTER):
raise errors.OpPrereqError("Instance disk %d has a complex layout,"
......@@ -1993,7 +2003,8 @@ class LUInstanceMove(LogicalUnit):
errs = []
transfers = []
# activate, get path, create transfer jobs
for idx, disk in enumerate(self.instance.disks):
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for idx, disk in enumerate(disks):
# FIXME: pass debug option from opcode to backend
dt = masterd.instance.DiskTransfer("disk/%s" % idx,
constants.IEIO_RAW_DISK,
......@@ -2002,6 +2013,7 @@ class LUInstanceMove(LogicalUnit):
(disk, self.instance),
None)
transfers.append(dt)
self.cfg.Update(disk, feedback_fn)
import_result = \
masterd.instance.TransferInstanceData(self, feedback_fn,
......@@ -2418,16 +2430,6 @@ def _ApplyContainerMods(kind, container, chgdesc, mods,
chgdesc.extend(changes)
def _UpdateIvNames(base_index, disks):
"""Updates the C{iv_name} attribute of disks.
@type disks: list of L{objects.Disk}
"""
for (idx, disk) in enumerate(disks):
disk.iv_name = "disk/%s" % (base_index + idx, )
class LUInstanceSetParams(LogicalUnit):
"""Modifies an instances's parameters.
......@@ -2858,7 +2860,7 @@ class LUInstanceSetParams(LogicalUnit):
assert self.instance.disk_template == constants.DT_PLAIN
disks = [{constants.IDISK_SIZE: d.size,
constants.IDISK_VG: d.logical_id[0]}
for d in self.instance.disks]
for d in self.cfg.GetInstanceDisks(self.instance.uuid)]
required = ComputeDiskSizePerVG(self.op.disk_template, disks)
CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
......@@ -2955,7 +2957,8 @@ class LUInstanceSetParams(LogicalUnit):
disk.name = params.get(constants.IDISK_NAME, None)
# Verify disk changes (operating on a copy)
disks = copy.deepcopy(self.instance.disks)
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
disks = copy.deepcopy(inst_disks)
_ApplyContainerMods("disk", disks, None, self.diskmod, None,
_PrepareDiskMod, None)
utils.ValidateDeviceNames("disk", disks)
......@@ -2963,7 +2966,7 @@ class LUInstanceSetParams(LogicalUnit):
raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
" more" % constants.MAX_DISKS,
errors.ECODE_STATE)
disk_sizes = [disk.size for disk in self.instance.disks]
disk_sizes = [disk.size for disk in inst_disks]
disk_sizes.extend(params["size"] for (op, idx, params, private) in
self.diskmod if op == constants.DDM_ADD)
ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
......@@ -3389,11 +3392,12 @@ class LUInstanceSetParams(LogicalUnit):
assert self.instance.disk_template == constants.DT_PLAIN
old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
# create a fake disk info for _GenerateDiskTemplate
disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
constants.IDISK_VG: d.logical_id[0],
constants.IDISK_NAME: d.name}
for d in self.instance.disks]
for d in old_disks]
new_disks = GenerateDiskTemplate(self, self.op.disk_template,
self.instance.uuid, pnode_uuid,
[snode_uuid], disk_info, None, None, 0,
......@@ -3415,7 +3419,7 @@ class LUInstanceSetParams(LogicalUnit):
# old ones
feedback_fn("Renaming original volumes...")
rename_list = [(o, n.children[0].logical_id)
for (o, n) in zip(self.instance.disks, new_disks)]
for (o, n) in zip(old_disks, new_disks)]
result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
result.Raise("Failed to rename original LVs")
......@@ -3432,16 +3436,27 @@ class LUInstanceSetParams(LogicalUnit):
feedback_fn("Initializing of DRBD devices failed;"
" renaming back original volumes...")
rename_back_list = [(n.children[0], o.logical_id)
for (n, o) in zip(new_disks, self.instance.disks)]
for (n, o) in zip(new_disks, old_disks)]
result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
result.Raise("Failed to rename LVs back after error %s" % str(e))
raise
# at this point, the instance has been modified
# Remove the old disks from the instance
for old_disk in old_disks:
self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
# Update instance structure
self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
self.instance.disk_template = constants.DT_DRBD8
self.instance.disks = new_disks
self.cfg.Update(self.instance, feedback_fn)
# Attach the new disks to the instance
for (idx, new_disk) in enumerate(new_disks):
self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
# re-read the instance from the configuration
self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
# Release node locks while waiting for sync
ReleaseLocks(self, locking.LEVEL_NODE)
......@@ -3466,8 +3481,9 @@ class LUInstanceSetParams(LogicalUnit):
snode_uuid = secondary_nodes[0]
feedback_fn("Converting template to plain")
old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
new_disks = [d.children[0] for d in self.instance.disks]
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)
new_disks = [d.children[0] for d in disks]
# copy over size, mode and name
for parent, child in zip(old_disks, new_disks):
......@@ -3481,12 +3497,22 @@ class LUInstanceSetParams(LogicalUnit):
tcp_port = disk.logical_id[2]
self.cfg.AddTcpUdpPort(tcp_port)
# update instance structure
self.instance.disks = new_disks
# Remove the old disks from the instance
for old_disk in old_disks:
self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
# Update instance structure
self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
self.instance.disk_template = constants.DT_PLAIN
_UpdateIvNames(0, self.instance.disks)
self.cfg.Update(self.instance, feedback_fn)
# Attach the new disks to the instance
for (idx, new_disk) in enumerate(new_disks):
self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
# re-read the instance from the configuration
self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
# Release locks in case removing disks takes a while
ReleaseLocks(self, locking.LEVEL_NODE)
......@@ -3528,8 +3554,9 @@ class LUInstanceSetParams(LogicalUnit):
"""
# add a new disk
instance_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
if self.instance.disk_template in constants.DTS_FILEBASED:
(file_driver, file_path) = self.instance.disks[0].logical_id
(file_driver, file_path) = instance_disks[0].logical_id
file_path = os.path.dirname(file_path)
else:
file_driver = file_path = None
......@@ -3542,6 +3569,10 @@ class LUInstanceSetParams(LogicalUnit):
file_driver, idx, self.Log, self.diskparams)[0]
new_disks = CreateDisks(self, self.instance, disks=[disk])
self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)
# re-read the instance from the configuration
self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
if self.cluster.prealloc_wipe_disks:
# Wipe new disk
......@@ -3608,6 +3639,9 @@ class LUInstanceSetParams(LogicalUnit):
disk.params[key] = value
changes.append(("disk.params:%s/%d" % (key, idx), value))
# Update disk object
self.cfg.Update(disk, self.feedback_fn)
return changes
def _RemoveDisk(self, idx, root, _):
......@@ -3635,6 +3669,12 @@ class LUInstanceSetParams(LogicalUnit):
if root.dev_type in constants.DTS_DRBD:
self.cfg.AddTcpUdpPort(root.logical_id[2])
# Remove disk from config
self.cfg.RemoveInstanceDisk(self.instance.uuid, root.uuid)
# re-read the instance from the configuration
self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
return hotmsg
def _CreateNewNic(self, idx, params, private):
......@@ -3710,6 +3750,7 @@ class LUInstanceSetParams(LogicalUnit):
All parameters take effect only at the next restart of the instance.
"""
self.feedback_fn = feedback_fn
# Process here the warnings from CheckPrereq, as we don't have a
# feedback_fn there.
# TODO: Replace with self.LogWarning
......@@ -3735,10 +3776,10 @@ class LUInstanceSetParams(LogicalUnit):
result.append(("runtime_memory", self.op.runtime_mem))
# Apply disk changes
_ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
_ApplyContainerMods("disk", inst_disks, result, self.diskmod,
self._CreateNewDisk, self._ModifyDisk,
self._RemoveDisk, post_add_fn=self._PostAddDisk)
_UpdateIvNames(0, self.instance.disks)
if self.op.disk_template:
if __debug__:
......
......@@ -484,11 +484,11 @@ class TLMigrateInstance(Tasklet):
"""
self.feedback_fn("* wait until resync is done")
all_done = False
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
while not all_done:
all_done = True
result = self.rpc.call_drbd_wait_sync(self.all_node_uuids,
(self.instance.disks,
self.instance))
(disks, self.instance))
min_percent = 100
for node_uuid, nres in result.items():
nres.Raise("Cannot resync disks on node %s" %
......@@ -509,8 +509,9 @@ class TLMigrateInstance(Tasklet):
self.feedback_fn("* switching node %s to secondary mode" %
self.cfg.GetNodeName(node_uuid))
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
result = self.rpc.call_blockdev_close(node_uuid, self.instance.name,
(self.instance.disks, self.instance))
(disks, self.instance))
result.Raise("Cannot change disk to secondary on node %s" %
self.cfg.GetNodeName(node_uuid))
......@@ -519,8 +520,9 @@ class TLMigrateInstance(Tasklet):
"""
self.feedback_fn("* changing into standalone mode")
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
result = self.rpc.call_drbd_disconnect_net(
self.all_node_uuids, (self.instance.disks, self.instance))
self.all_node_uuids, (disks, self.instance))
for node_uuid, nres in result.items():
nres.Raise("Cannot disconnect disks node %s" %
self.cfg.GetNodeName(node_uuid))
......@@ -534,8 +536,9 @@ class TLMigrateInstance(Tasklet):
else:
msg = "single-master"
self.feedback_fn("* changing disks into %s mode" % msg)
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
result = self.rpc.call_drbd_attach_net(self.all_node_uuids,
(self.instance.disks, self.instance),
(disks, self.instance),
self.instance.name, multimaster)
for node_uuid, nres in result.items():
nres.Raise("Cannot change disks config on node %s" %
......@@ -681,7 +684,7 @@ class TLMigrateInstance(Tasklet):
(src_version, dst_version))
self.feedback_fn("* checking disk consistency between source and target")
for (idx, dev) in enumerate(self.instance.disks):
for (idx, dev) in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
if not CheckDiskConsistency(self.lu, self.instance, dev,
self.target_node_uuid,
False):
......@@ -816,7 +819,8 @@ class TLMigrateInstance(Tasklet):
# If the instance's disk template is `rbd' or `ext' and there was a
# successful migration, unmap the device from the source node.
if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
disks = ExpandCheckDisks(self.instance, self.instance.disks)
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
disks = ExpandCheckDisks(inst_disks, inst_disks)
self.feedback_fn("* unmapping instance's disks from %s" %
self.cfg.GetNodeName(self.source_node_uuid))
for disk in disks:
......@@ -847,7 +851,8 @@ class TLMigrateInstance(Tasklet):
if self.instance.disks_active:
self.feedback_fn("* checking disk consistency between source and target")
for (idx, dev) in enumerate(self.instance.disks):
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for (idx, dev) in enumerate(inst_disks):
# for drbd, these are drbd over lvm
if not CheckDiskConsistency(self.lu, self.instance, dev,
self.target_node_uuid, False):
......
......@@ -247,7 +247,7 @@ class LUInstanceQueryData(NoHooksLU):
disks = map(compat.partial(self._ComputeDiskStatus, instance,
node_uuid2name_fn),
instance.disks)
self.cfg.GetInstanceDisks(instance.uuid))
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
snodes_group_uuids = [nodes[snode_uuid].group
......
......@@ -233,7 +233,7 @@ def CreateDisks(lu, instance, instance_disks=None,
"""
info = GetInstanceInfoText(instance)
if instance_disks is None:
instance_disks = instance.disks
instance_disks = lu.cfg.GetInstanceDisks(instance.uuid)
if target_node_uuid is None:
pnode_uuid = instance.primary_node
# We cannot use config's 'GetInstanceNodes' here as 'CreateDisks'
......@@ -613,7 +613,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
constants.IDISK_SIZE: d.size,
constants.IDISK_MODE: d.mode,
constants.IDISK_SPINDLES: d.spindles,
} for d in self.instance.disks]
} for d in self.cfg.GetInstanceDisks(self.instance.uuid)]
req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
disk_template=disk_template,
tags=list(self.instance.GetTags()),
......@@ -826,7 +826,8 @@ class LUInstanceRecreateDisks(LogicalUnit):
to_skip = []
mods = [] # keeps track of needed changes
for idx, disk in enumerate(self.instance.disks):
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for idx, disk in enumerate(inst_disks):
try:
changes = self.disks[idx]
except KeyError:
......@@ -854,7 +855,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
# now that we have passed all asserts above, we can apply the mods
# in a single run (to avoid partial changes)
for idx, new_id, changes in mods:
disk = self.instance.disks[idx]
disk = inst_disks[idx]
if new_id is not None:
assert disk.dev_type == constants.DT_DRBD8
disk.logical_id = new_id
......@@ -862,6 +863,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
disk