Commit a57e502a authored by Thomas Thrainer's avatar Thomas Thrainer
Browse files

Remove physical_id field from disk object



The 'physical_id' field of disk objects is no longer used, so remove it.
Also, all references are removed together with the code which made sure
that the physical_id is up to date when transmitted over RPC.
Signed-off-by: default avatarThomas Thrainer <thomasth@google.com>
Reviewed-by: default avatarJose A. Lopes <jabolopes@google.com>
parent 0c3d9c7c
......@@ -2851,7 +2851,7 @@ def OSEnvironment(instance, inst_os, debug=0):
result["DISK_%d_BACKEND_TYPE" % idx] = "block"
elif disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]:
result["DISK_%d_BACKEND_TYPE" % idx] = \
"file:%s" % disk.physical_id[0]
"file:%s" % disk.logical_id[0]
# NICs
for idx, nic in enumerate(instance.nics):
......@@ -3076,7 +3076,7 @@ def FinalizeExport(instance, snap_disks):
config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
("%s" % disk.iv_name))
config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
("%s" % disk.physical_id[1]))
("%s" % disk.logical_id[1]))
config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
("%d" % disk.size))
......@@ -3159,11 +3159,9 @@ def BlockdevRename(devlist):
"""Rename a list of block devices.
@type devlist: list of tuples
@param devlist: list of tuples of the form (disk,
new_logical_id, new_physical_id); disk is an
L{objects.Disk} object describing the current disk,
and new logical_id/physical_id is the name we
rename it to
@param devlist: list of tuples of the form (disk, new_unique_id); disk is
an L{objects.Disk} object describing the current disk, and new
unique_id is the name we rename it to
@rtype: boolean
@return: True if all renames succeeded, False otherwise
......
......@@ -942,10 +942,6 @@ def _FormatDiskDetails(dev_type, dev, roman):
return data
def _FormatListInfo(data):
return list(str(i) for i in data)
def _FormatBlockDevInfo(idx, top_level, dev, roman):
"""Show block device information.
......@@ -1043,8 +1039,6 @@ def _FormatBlockDevInfo(idx, top_level, dev, roman):
data.append(("logical_id", l_id[0]))
else:
data.extend(l_id)
elif dev["physical_id"] is not None:
data.append(("physical_id:", _FormatListInfo(dev["physical_id"])))
if dev["pstatus"]:
data.append(("on primary", helper(dev["dev_type"], dev["pstatus"])))
......
......@@ -393,11 +393,6 @@ class LUBackupExport(LogicalUnit):
" node %s" % (self.instance.name,
self.cfg.GetNodeName(src_node_uuid)))
# set the disks ID correctly since call_instance_start needs the
# correct drbd minor to create the symlinks
for disk in self.instance.disks:
self.cfg.SetDiskID(disk, src_node_uuid)
activate_disks = not self.instance.disks_active
if activate_disks:
......
......@@ -538,8 +538,6 @@ class LUClusterRepairDiskSizes(NoHooksLU):
continue
newl = [(v[2].Copy(), v[0]) for v in dskl]
for (dsk, _) in newl:
self.cfg.SetDiskID(dsk, node_uuid)
node_name = self.cfg.GetNodeName(node_uuid)
result = self.rpc.call_blockdev_getdimensions(node_uuid, newl)
if result.fail_msg:
......@@ -2695,7 +2693,6 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
for (inst_uuid, dev) in disks:
(anno_disk,) = AnnotateDiskParams(instanceinfo[inst_uuid], [dev],
self.cfg)
self.cfg.SetDiskID(anno_disk, nuuid)
dev_inst_only.append((anno_disk, instanceinfo[inst_uuid]))
node_disks_dev_inst_only[nuuid] = dev_inst_only
......
......@@ -1036,9 +1036,6 @@ def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq):
faulty = []
for dev in instance.disks:
cfg.SetDiskID(dev, node_uuid)
result = rpc_runner.call_blockdev_getmirrorstatus(
node_uuid, (instance.disks, instance))
result.Raise("Failed to get disk status from node %s" %
......
......@@ -1272,7 +1272,6 @@ class LUInstanceCreate(LogicalUnit):
for t_dsk, a_dsk in zip(tmp_disks, self.disks):
rename_to.append(t_dsk.logical_id)
t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
result = self.rpc.call_blockdev_rename(self.pnode.uuid,
zip(tmp_disks, rename_to))
result.Raise("Failed to rename adoped LVs")
......@@ -1337,11 +1336,6 @@ class LUInstanceCreate(LogicalUnit):
ReleaseLocks(self, locking.LEVEL_NODE_RES)
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
# we need to set the disks ID to the primary node, since the
# preceding code might or might have not done it, depending on
# disk template and other options
for disk in iobj.disks:
self.cfg.SetDiskID(disk, self.pnode.uuid)
if self.op.mode == constants.INSTANCE_CREATE:
if not self.op.no_install:
pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
......@@ -1573,7 +1567,6 @@ class LUInstanceRename(LogicalUnit):
info = GetInstanceInfoText(renamed_inst)
for (idx, disk) in enumerate(renamed_inst.disks):
for node_uuid in renamed_inst.all_nodes:
self.cfg.SetDiskID(disk, node_uuid)
result = self.rpc.call_blockdev_setinfo(node_uuid,
(disk, renamed_inst), info)
result.Warn("Error setting info on node %s for disk %s" %
......@@ -3132,8 +3125,6 @@ class LUInstanceSetParams(LogicalUnit):
except errors.GenericError, e:
feedback_fn("Initializing of DRBD devices failed;"
" renaming back original volumes...")
for disk in new_disks:
self.cfg.SetDiskID(disk, pnode_uuid)
rename_back_list = [(n.children[0], o.logical_id)
for (n, o) in zip(new_disks, self.instance.disks)]
result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
......@@ -3194,7 +3185,6 @@ class LUInstanceSetParams(LogicalUnit):
feedback_fn("Removing volumes on the secondary node...")
for disk in old_disks:
self.cfg.SetDiskID(disk, snode_uuid)
result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
result.Warn("Could not remove block device %s on node %s,"
" continuing anyway" %
......@@ -3204,7 +3194,6 @@ class LUInstanceSetParams(LogicalUnit):
feedback_fn("Removing unneeded volumes on the primary node...")
for idx, disk in enumerate(old_disks):
meta = disk.children[1]
self.cfg.SetDiskID(meta, pnode_uuid)
result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
result.Warn("Could not remove metadata for disk %d on node %s,"
" continuing anyway" %
......@@ -3264,7 +3253,6 @@ class LUInstanceSetParams(LogicalUnit):
(anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
for node_uuid, disk in anno_disk.ComputeNodeTree(
self.instance.primary_node):
self.cfg.SetDiskID(disk, node_uuid)
msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
.fail_msg
if msg:
......
......@@ -502,9 +502,6 @@ class TLMigrateInstance(Tasklet):
self.feedback_fn("* switching node %s to secondary mode" %
self.cfg.GetNodeName(node_uuid))
for dev in self.instance.disks:
self.cfg.SetDiskID(dev, node_uuid)
result = self.rpc.call_blockdev_close(node_uuid, self.instance.name,
(self.instance.disks, self.instance))
result.Raise("Cannot change disk to secondary on node %s" %
......
......@@ -394,8 +394,6 @@ class LUInstanceReboot(LogicalUnit):
if instance_running and \
self.op.reboot_type in [constants.INSTANCE_REBOOT_SOFT,
constants.INSTANCE_REBOOT_HARD]:
for disk in self.instance.disks:
self.cfg.SetDiskID(disk, current_node_uuid)
result = self.rpc.call_instance_reboot(current_node_uuid, self.instance,
self.op.reboot_type,
self.op.shutdown_timeout,
......
......@@ -308,7 +308,6 @@ class LUInstanceQueryData(NoHooksLU):
if self.op.static or not node_uuid:
return None
self.cfg.SetDiskID(dev, node_uuid)
result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
if result.offline:
return None
......@@ -372,7 +371,6 @@ class LUInstanceQueryData(NoHooksLU):
"dev_type": dev.dev_type,
"logical_id": dev.logical_id,
"drbd_info": drbd_info,
"physical_id": dev.physical_id,
"pstatus": dev_pstatus,
"sstatus": dev_sstatus,
"children": dev_children,
......
......@@ -79,7 +79,6 @@ def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
@param excl_stor: Whether exclusive_storage is active for the node
"""
lu.cfg.SetDiskID(device, node_uuid)
result = lu.rpc.call_blockdev_create(node_uuid, (device, instance),
device.size, instance.name, force_open,
info, excl_stor)
......@@ -87,8 +86,6 @@ def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
" node %s for instance %s" % (device,
lu.cfg.GetNodeName(node_uuid),
instance.name))
if device.physical_id is None:
device.physical_id = result.payload
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
......@@ -198,7 +195,6 @@ def _UndoCreateDisks(lu, disks_created, instance):
"""
for (node_uuid, disk) in disks_created:
lu.cfg.SetDiskID(disk, node_uuid)
result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
result.Warn("Failed to remove newly-created disk %s on node %s" %
(disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
......@@ -1016,9 +1012,6 @@ def WipeDisks(lu, instance, disks=None):
disks = [(idx, disk, 0)
for (idx, disk) in enumerate(instance.disks)]
for (_, device, _) in disks:
lu.cfg.SetDiskID(device, node_uuid)
logging.info("Pausing synchronization of disks of instance '%s'",
instance.name)
result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
......@@ -1150,9 +1143,6 @@ def WaitForSync(lu, instance, disks=None, oneshot=False):
node_uuid = instance.primary_node
node_name = lu.cfg.GetNodeName(node_uuid)
for dev in disks:
lu.cfg.SetDiskID(dev, node_uuid)
# TODO: Convert to utils.Retry
retries = 0
......@@ -1227,7 +1217,6 @@ def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
for disk in disks:
for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
lu.cfg.SetDiskID(top_disk, node_uuid)
result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
msg = result.fail_msg
if msg:
......@@ -1298,7 +1287,6 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
if ignore_size:
node_disk = node_disk.Copy()
node_disk.UnsetSize()
lu.cfg.SetDiskID(node_disk, node_uuid)
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance.name, False, idx)
msg = result.fail_msg
......@@ -1324,7 +1312,6 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
if ignore_size:
node_disk = node_disk.Copy()
node_disk.UnsetSize()
lu.cfg.SetDiskID(node_disk, node_uuid)
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance.name, True, idx)
msg = result.fail_msg
......@@ -1339,12 +1326,6 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
device_info.append((lu.cfg.GetNodeName(instance.primary_node),
inst_disk.iv_name, dev_path))
# leave the disks configured for the primary node
# this is a workaround that would be fixed better by
# improving the logical/physical id handling
for disk in disks:
lu.cfg.SetDiskID(disk, instance.primary_node)
if not disks_ok:
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
......@@ -1481,7 +1462,6 @@ class LUInstanceGrowDisk(LogicalUnit):
# First run all grow ops in dry-run mode
for node_uuid in self.instance.all_nodes:
self.cfg.SetDiskID(self.disk, node_uuid)
result = self.rpc.call_blockdev_grow(node_uuid,
(self.disk, self.instance),
self.delta, True, True,
......@@ -1491,7 +1471,6 @@ class LUInstanceGrowDisk(LogicalUnit):
if wipe_disks:
# Get disk size from primary node for wiping
self.cfg.SetDiskID(self.disk, self.instance.primary_node)
result = self.rpc.call_blockdev_getdimensions(
self.instance.primary_node, ([self.disk], self.instance))
result.Raise("Failed to retrieve disk size from node '%s'" %
......@@ -1515,7 +1494,6 @@ class LUInstanceGrowDisk(LogicalUnit):
# We know that (as far as we can test) operations across different
# nodes will succeed, time to run it for real on the backing storage
for node_uuid in self.instance.all_nodes:
self.cfg.SetDiskID(self.disk, node_uuid)
result = self.rpc.call_blockdev_grow(node_uuid,
(self.disk, self.instance),
self.delta, False, True,
......@@ -1525,7 +1503,6 @@ class LUInstanceGrowDisk(LogicalUnit):
# And now execute it for logical storage, on the primary node
node_uuid = self.instance.primary_node
self.cfg.SetDiskID(self.disk, node_uuid)
result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
self.delta, False, False,
self.node_es_flags[node_uuid])
......@@ -1798,8 +1775,6 @@ def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
the device(s)) to the ldisk (representing the local storage status).
"""
lu.cfg.SetDiskID(dev, node_uuid)
result = True
if on_primary or dev.AssembleOnSecondary():
......@@ -1943,7 +1918,6 @@ class TLReplaceDisks(Tasklet):
for node_uuid in node_uuids:
self.lu.LogInfo("Checking disk/%d on %s", idx,
self.cfg.GetNodeName(node_uuid))
self.cfg.SetDiskID(dev, node_uuid)
result = _BlockdevFind(self, node_uuid, dev, instance)
......@@ -2203,7 +2177,6 @@ class TLReplaceDisks(Tasklet):
for node_uuid in node_uuids:
self.lu.LogInfo("Checking disk/%d on %s", idx,
self.cfg.GetNodeName(node_uuid))
self.cfg.SetDiskID(dev, node_uuid)
result = _BlockdevFind(self, node_uuid, dev, self.instance)
......@@ -2246,8 +2219,6 @@ class TLReplaceDisks(Tasklet):
self.lu.LogInfo("Adding storage on %s for disk/%d",
self.cfg.GetNodeName(node_uuid), idx)
self.cfg.SetDiskID(dev, node_uuid)
lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
names = _GenerateUniqueNames(self.lu, lv_names)
......@@ -2280,8 +2251,6 @@ class TLReplaceDisks(Tasklet):
def _CheckDevices(self, node_uuid, iv_names):
for name, (dev, _, _) in iv_names.iteritems():
self.cfg.SetDiskID(dev, node_uuid)
result = _BlockdevFind(self, node_uuid, dev, self.instance)
msg = result.fail_msg
......@@ -2299,7 +2268,6 @@ class TLReplaceDisks(Tasklet):
self.lu.LogInfo("Remove logical volumes for %s", name)
for lv in old_lvs:
self.cfg.SetDiskID(lv, node_uuid)
msg = self.rpc.call_blockdev_remove(node_uuid, (lv, self.instance)) \
.fail_msg
if msg:
......@@ -2362,13 +2330,12 @@ class TLReplaceDisks(Tasklet):
# ok, we created the new LVs, so now we know we have the needed
# storage; as such, we proceed on the target node to rename
# old_lv to _old, and new_lv to old_lv; note that we rename LVs
# using the assumption that logical_id == physical_id (which in
# turn is the unique_id on that node)
# using the assumption that logical_id == unique_id on that node
# FIXME(iustin): use a better name for the replaced LVs
temp_suffix = int(time.time())
ren_fn = lambda d, suff: (d.physical_id[0],
d.physical_id[1] + "_replaced-%s" % suff)
ren_fn = lambda d, suff: (d.logical_id[0],
d.logical_id[1] + "_replaced-%s" % suff)
# Build the rename list based on what LVs exist on the node
rename_old_to_new = []
......@@ -2387,7 +2354,7 @@ class TLReplaceDisks(Tasklet):
# Now we rename the new LVs to the old LVs
self.lu.LogInfo("Renaming the new LVs on the target node")
rename_new_to_old = [(new, old.physical_id)
rename_new_to_old = [(new, old.logical_id)
for old, new in zip(old_lvs, new_lvs)]
result = self.rpc.call_blockdev_rename(self.target_node_uuid,
rename_new_to_old)
......@@ -2553,7 +2520,6 @@ class TLReplaceDisks(Tasklet):
# We have new devices, shutdown the drbd on the old secondary
for idx, dev in enumerate(self.instance.disks):
self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
self.cfg.SetDiskID(dev, self.target_node_uuid)
msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
(dev, self.instance)).fail_msg
if msg:
......@@ -2578,7 +2544,6 @@ class TLReplaceDisks(Tasklet):
self.lu.LogInfo("Updating instance configuration")
for dev, _, new_logical_id in iv_names.itervalues():
dev.logical_id = new_logical_id
self.cfg.SetDiskID(dev, self.instance.primary_node)
self.cfg.Update(self.instance, feedback_fn)
......
......@@ -274,7 +274,6 @@ def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
else:
edata = device.ComputeNodeTree(instance.primary_node)
for node_uuid, disk in edata:
lu.cfg.SetDiskID(disk, node_uuid)
result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
if result.fail_msg:
lu.LogWarning("Could not remove disk %s on node %s,"
......
......@@ -553,15 +553,13 @@ class ConfigWriter(object):
return result
def _CheckDiskIDs(self, disk, l_ids, p_ids):
def _CheckDiskIDs(self, disk, l_ids):
"""Compute duplicate disk IDs
@type disk: L{objects.Disk}
@param disk: the disk at which to start searching
@type l_ids: list
@param l_ids: list of current logical ids
@type p_ids: list
@param p_ids: list of current physical ids
@rtype: list
@return: a list of error messages
......@@ -572,15 +570,10 @@ class ConfigWriter(object):
result.append("duplicate logical id %s" % str(disk.logical_id))
else:
l_ids.append(disk.logical_id)
if disk.physical_id is not None:
if disk.physical_id in p_ids:
result.append("duplicate physical id %s" % str(disk.physical_id))
else:
p_ids.append(disk.physical_id)
if disk.children:
for child in disk.children:
result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
result.extend(self._CheckDiskIDs(child, l_ids))
return result
def _UnlockedVerifyConfig(self):
......@@ -598,7 +591,6 @@ class ConfigWriter(object):
data = self._config_data
cluster = data.cluster
seen_lids = []
seen_pids = []
# global cluster checks
if not cluster.enabled_hypervisors:
......@@ -729,7 +721,7 @@ class ConfigWriter(object):
for idx, disk in enumerate(instance.disks):
result.extend(["instance '%s' disk %d error: %s" %
(instance.name, idx, msg) for msg in disk.Verify()])
result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
result.extend(self._CheckDiskIDs(disk, seen_lids))
wrong_names = _CheckInstanceDiskIvNames(instance.disks)
if wrong_names:
......@@ -873,57 +865,6 @@ class ConfigWriter(object):
"""
return self._UnlockedVerifyConfig()
def _UnlockedSetDiskID(self, disk, node_uuid):
"""Convert the unique ID to the ID needed on the target nodes.
This is used only for drbd, which needs ip/port configuration.
The routine descends down and updates its children also, because
this helps when the only the top device is passed to the remote
node.
This function is for internal use, when the config lock is already held.
"""
if disk.children:
for child in disk.children:
self._UnlockedSetDiskID(child, node_uuid)
if disk.logical_id is None and disk.physical_id is not None:
return
if disk.dev_type == constants.DT_DRBD8:
pnode, snode, port, pminor, sminor, secret = disk.logical_id
if node_uuid not in (pnode, snode):
raise errors.ConfigurationError("DRBD device not knowing node %s" %
node_uuid)
pnode_info = self._UnlockedGetNodeInfo(pnode)
snode_info = self._UnlockedGetNodeInfo(snode)
if pnode_info is None or snode_info is None:
raise errors.ConfigurationError("Can't find primary or secondary node"
" for %s" % str(disk))
p_data = (pnode_info.secondary_ip, port)
s_data = (snode_info.secondary_ip, port)
if pnode == node_uuid:
disk.physical_id = p_data + s_data + (pminor, secret)
else: # it must be secondary, we tested above
disk.physical_id = s_data + p_data + (sminor, secret)
else:
disk.physical_id = disk.logical_id
return
@locking.ssynchronized(_config_lock)
def SetDiskID(self, disk, node_uuid):
"""Convert the unique ID to the ID needed on the target nodes.
This is used only for drbd, which needs ip/port configuration.
The routine descends down and updates its children also, because
this helps when the only the top device is passed to the remote
node.
"""
return self._UnlockedSetDiskID(disk, node_uuid)
@locking.ssynchronized(_config_lock)
def AddTcpUdpPort(self, port):
"""Adds a new port to the available port pool.
......@@ -1586,7 +1527,6 @@ class ConfigWriter(object):
disk.logical_id = (disk.logical_id[0],
utils.PathJoin(file_storage_dir, inst.name,
"disk%s" % idx))
disk.physical_id = disk.logical_id
# Force update of ssconf files
self._config_data.cluster.serial_no += 1
......
......@@ -296,7 +296,7 @@ def _GetConfigFileDiskData(block_devices, blockdev_prefix,
mode = "r"
if cfdev.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]:
driver = _FILE_DRIVER_MAP[cfdev.physical_id[0]]
driver = _FILE_DRIVER_MAP[cfdev.logical_id[0]]
else:
driver = "phy"
......
......@@ -1186,8 +1186,7 @@ class ExportInstanceHelper:
disk_id = tuple(result.payload)
disk_params = constants.DISK_LD_DEFAULTS[constants.DT_PLAIN].copy()
new_dev = objects.Disk(dev_type=constants.DT_PLAIN, size=disk.size,
logical_id=disk_id, physical_id=disk_id,
iv_name=disk.iv_name,
logical_id=disk_id, iv_name=disk.iv_name,
params=disk_params)
self._snap_disks.append(new_dev)
......@@ -1237,7 +1236,7 @@ class ExportInstanceHelper:
continue
path = utils.PathJoin(pathutils.EXPORT_DIR, "%s.new" % instance.name,
dev.physical_id[1])
dev.logical_id[1])
finished_fn = compat.partial(self._TransferFinished, idx)
......
......@@ -506,7 +506,7 @@ class NIC(ConfigObject):
class Disk(ConfigObject):
"""Config object representing a block device."""
__slots__ = (["name", "dev_type", "logical_id", "physical_id", "children", "iv_name",
__slots__ = (["name", "dev_type", "logical_id", "children", "iv_name",
"size", "mode", "params", "spindles"] + _UUID +
# dynamic_params is special. It depends on the node this instance
# is sent to, and should not be persisted.
......@@ -768,8 +768,6 @@ class Disk(ConfigObject):
obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
if obj.logical_id and isinstance(obj.logical_id, list):
obj.logical_id = tuple(obj.logical_id)
if obj.physical_id and isinstance(obj.physical_id, list):
obj.physical_id = tuple(obj.physical_id)
if obj.dev_type in constants.LDS_DRBD:
# we need a tuple of length six here
if len(obj.logical_id) < 6:
......@@ -785,22 +783,16 @@ class Disk(ConfigObject):
elif self.dev_type in constants.LDS_DRBD:
node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
val = "<DRBD8("
if self.physical_id is None:
phy = "unconfigured"
else:
phy = ("configured as %s:%s %s:%s" %
(self.physical_id[0], self.physical_id[1],
self.physical_id[2], self.physical_id[3]))
val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
(node_a, minor_a, node_b, minor_b, port, phy))
val += ("hosts=%s/%d-%s/%d, port=%s, " %
(node_a, minor_a, node_b, minor_b, port))
if self.children and self.children.count(None) == 0:
val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
else:
val += "no local storage"
else:
val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
(self.dev_type, self.logical_id, self.physical_id, self.children))
val = ("<Disk(type=%s, logical_id=%s, children=%s" %
(self.dev_type, self.logical_id, self.children))
if self.iv_name is None:
val += ", not visible"
else:
......
......@@ -379,7 +379,6 @@ decodeDLId obj lid = do
-- code currently can't build it.
data Disk = Disk
{ diskLogicalId :: DiskLogicalId
-- , diskPhysicalId :: String
, diskChildren :: [Disk]
, diskIvName :: String
, diskSize :: Int
......@@ -392,7 +391,6 @@ data Disk = Disk
$(buildObjectSerialisation "Disk" $
[ customField 'decodeDLId 'encodeFullDLId ["dev_type"] $
simpleField "logical_id" [t| DiskLogicalId |]
-- , simpleField "physical_id" [t| String |]
, defaultField [| [] |] $ simpleField "children" [t| [Disk] |]
, defaultField [| "" |] $ simpleField "iv_name" [t| String |]
, simpleField "size" [t| Int |]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!