Commit 0c3d9c7c authored by Thomas Thrainer's avatar Thomas Thrainer
Browse files

Replace physical_id with dynamic_params



The disk field 'physical_id' has to be kept up to date whenever a disk
object is sent to a node via RPC. This is done with the SetDiskID method
manually, which is a source of bugs.

This patch replaces the use of 'physical_id' with a new field names
'dynamic_params'. The RPC code is adapted to update this field whenever
a disk object is sent to a node. Furthermore, this field is only ever
set on copies of disk objects which don't get written to the
configuration file. On the node side, the use of 'physical_id' is
removed and the new dynamic parameters are used now for the same
purpose.
Signed-off-by: default avatarThomas Thrainer <thomasth@google.com>
Reviewed-by: default avatarJose A. Lopes <jabolopes@google.com>
parent 5bd52dab
......@@ -3634,8 +3634,6 @@ def _GetImportExportIoCommand(instance, mode, ieio, ieargs):
assert isinstance(disk_index, (int, long))
real_disk = _OpenRealBD(disk)
inst_os = OSFromDisk(instance.os)
env = OSEnvironment(instance, inst_os)
......@@ -3645,6 +3643,7 @@ def _GetImportExportIoCommand(instance, mode, ieio, ieargs):
script = inst_os.import_script
elif mode == constants.IEM_EXPORT:
real_disk = _OpenRealBD(disk)
env["EXPORT_DEVICE"] = real_disk.dev_path
env["EXPORT_INDEX"] = str(disk_index)
script = inst_os.export_script
......@@ -3872,35 +3871,31 @@ def CleanupImportExport(name):
shutil.rmtree(status_dir, ignore_errors=True)
def _SetPhysicalId(target_node_uuid, nodes_ip, disks):
"""Sets the correct physical ID on all passed disks.
"""
for cf in disks:
cf.SetPhysicalID(target_node_uuid, nodes_ip)
def _FindDisks(disks):
"""Finds attached L{BlockDev}s for the given disks.
@type disks: list of L{objects.Disk}
@param disks: the disk objects we need to find
def _FindDisks(target_node_uuid, nodes_ip, disks):
"""Sets the physical ID on disks and returns the block devices.
@return: list of L{BlockDev} objects or C{None} if a given disk
was not found or was no attached.
"""
_SetPhysicalId(target_node_uuid, nodes_ip, disks)
bdevs = []
for cf in disks:
rd = _RecursiveFindBD(cf)
for disk in disks:
rd = _RecursiveFindBD(disk)
if rd is None:
_Fail("Can't find device %s", cf)
_Fail("Can't find device %s", disk)
bdevs.append(rd)
return bdevs
def DrbdDisconnectNet(target_node_uuid, nodes_ip, disks):
def DrbdDisconnectNet(disks):
"""Disconnects the network on a list of drbd devices.
"""
bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
bdevs = _FindDisks(disks)
# disconnect disks
for rd in bdevs:
......@@ -3911,12 +3906,11 @@ def DrbdDisconnectNet(target_node_uuid, nodes_ip, disks):
err, exc=True)
def DrbdAttachNet(target_node_uuid, nodes_ip, disks, instance_name,
multimaster):
def DrbdAttachNet(disks, instance_name, multimaster):
"""Attaches the network on a list of drbd devices.
"""
bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
bdevs = _FindDisks(disks)
if multimaster:
for idx, rd in enumerate(bdevs):
......@@ -3974,7 +3968,7 @@ def DrbdAttachNet(target_node_uuid, nodes_ip, disks, instance_name,
_Fail("Can't change to primary mode: %s", err)
def DrbdWaitSync(target_node_uuid, nodes_ip, disks):
def DrbdWaitSync(disks):
"""Wait until DRBDs have synchronized.
"""
......@@ -3984,7 +3978,7 @@ def DrbdWaitSync(target_node_uuid, nodes_ip, disks):
raise utils.RetryAgain()
return stats
bdevs = _FindDisks(target_node_uuid, nodes_ip, disks)
bdevs = _FindDisks(disks)
min_resync = 100
alldone = True
......@@ -4004,11 +3998,10 @@ def DrbdWaitSync(target_node_uuid, nodes_ip, disks):
return (alldone, min_resync)
def DrbdNeedsActivation(target_node_uuid, nodes_ip, disks):
def DrbdNeedsActivation(disks):
"""Checks which of the passed disks needs activation and returns their UUIDs.
"""
_SetPhysicalId(target_node_uuid, nodes_ip, disks)
faulty_disks = []
for disk in disks:
......
......@@ -533,8 +533,12 @@ class LUClusterRepairDiskSizes(NoHooksLU):
changed = []
for node_uuid, dskl in per_node_disks.items():
newl = [v[2].Copy() for v in dskl]
for dsk in newl:
if not dskl:
# no disks on the node
continue
newl = [(v[2].Copy(), v[0]) for v in dskl]
for (dsk, _) in newl:
self.cfg.SetDiskID(dsk, node_uuid)
node_name = self.cfg.GetNodeName(node_uuid)
result = self.rpc.call_blockdev_getdimensions(node_uuid, newl)
......@@ -2667,7 +2671,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
"""
node_disks = {}
node_disks_devonly = {}
node_disks_dev_inst_only = {}
diskless_instances = set()
diskless = constants.DT_DISKLESS
......@@ -2687,20 +2691,20 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
node_disks[nuuid] = disks
# _AnnotateDiskParams makes already copies of the disks
devonly = []
dev_inst_only = []
for (inst_uuid, dev) in disks:
(anno_disk,) = AnnotateDiskParams(instanceinfo[inst_uuid], [dev],
self.cfg)
self.cfg.SetDiskID(anno_disk, nuuid)
devonly.append(anno_disk)
dev_inst_only.append((anno_disk, instanceinfo[inst_uuid]))
node_disks_devonly[nuuid] = devonly
node_disks_dev_inst_only[nuuid] = dev_inst_only
assert len(node_disks) == len(node_disks_devonly)
assert len(node_disks) == len(node_disks_dev_inst_only)
# Collect data from all nodes with disks
result = self.rpc.call_blockdev_getmirrorstatus_multi(node_disks.keys(),
node_disks_devonly)
result = self.rpc.call_blockdev_getmirrorstatus_multi(
node_disks.keys(), node_disks_dev_inst_only)
assert len(result) == len(node_disks)
......
......@@ -720,8 +720,7 @@ def AnnotateDiskParams(instance, devs, cfg):
@see L{rpc.AnnotateDiskParams}
"""
return rpc.AnnotateDiskParams(instance.disk_template, devs,
cfg.GetInstanceDiskParams(instance))
return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
def SupportsOob(cfg, node):
......
......@@ -968,12 +968,9 @@ class LUGroupVerifyDisks(NoHooksLU):
inst.secondary_nodes):
node_to_inst.setdefault(node_uuid, []).append(inst)
nodes_ip = dict((uuid, node.secondary_ip) for (uuid, node)
in self.cfg.GetMultiNodeInfo(node_to_inst.keys()))
for (node_uuid, insts) in node_to_inst.items():
node_disks = [(inst.disks, inst) for inst in insts]
node_res = self.rpc.call_drbd_needs_activation(node_uuid, nodes_ip,
node_disks)
node_res = self.rpc.call_drbd_needs_activation(node_uuid, node_disks)
msg = node_res.fail_msg
if msg:
logging.warning("Error getting DRBD status on node %s: %s",
......
......@@ -1389,7 +1389,7 @@ class LUInstanceCreate(LogicalUnit):
dt = masterd.instance.DiskTransfer("disk/%s" % idx,
constants.IEIO_FILE, (image, ),
constants.IEIO_SCRIPT,
(iobj.disks[idx], idx),
((iobj.disks[idx], iobj), idx),
None)
transfers.append(dt)
......@@ -1574,7 +1574,8 @@ class LUInstanceRename(LogicalUnit):
for (idx, disk) in enumerate(renamed_inst.disks):
for node_uuid in renamed_inst.all_nodes:
self.cfg.SetDiskID(disk, node_uuid)
result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
result = self.rpc.call_blockdev_setinfo(node_uuid,
(disk, renamed_inst), info)
result.Warn("Error setting info on node %s for disk %s" %
(self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
try:
......@@ -3098,8 +3099,7 @@ class LUInstanceSetParams(LogicalUnit):
self.instance.uuid, pnode_uuid,
[snode_uuid], disk_info, None, None, 0,
feedback_fn, self.diskparams)
anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
self.diskparams)
anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
info = GetInstanceInfoText(self.instance)
......@@ -3195,7 +3195,7 @@ class LUInstanceSetParams(LogicalUnit):
feedback_fn("Removing volumes on the secondary node...")
for disk in old_disks:
self.cfg.SetDiskID(disk, snode_uuid)
result = self.rpc.call_blockdev_remove(snode_uuid, disk)
result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
result.Warn("Could not remove block device %s on node %s,"
" continuing anyway" %
(disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
......@@ -3205,7 +3205,7 @@ class LUInstanceSetParams(LogicalUnit):
for idx, disk in enumerate(old_disks):
meta = disk.children[1]
self.cfg.SetDiskID(meta, pnode_uuid)
result = self.rpc.call_blockdev_remove(pnode_uuid, meta)
result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
result.Warn("Could not remove metadata for disk %d on node %s,"
" continuing anyway" %
(idx, self.cfg.GetNodeName(pnode_uuid)),
......@@ -3265,7 +3265,8 @@ class LUInstanceSetParams(LogicalUnit):
for node_uuid, disk in anno_disk.ComputeNodeTree(
self.instance.primary_node):
self.cfg.SetDiskID(disk, node_uuid)
msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
.fail_msg
if msg:
self.LogWarning("Could not remove disk/%d on node '%s': %s,"
" continuing anyway", idx,
......
......@@ -480,7 +480,6 @@ class TLMigrateInstance(Tasklet):
while not all_done:
all_done = True
result = self.rpc.call_drbd_wait_sync(self.all_node_uuids,
self.nodes_ip,
(self.instance.disks,
self.instance))
min_percent = 100
......@@ -507,7 +506,7 @@ class TLMigrateInstance(Tasklet):
self.cfg.SetDiskID(dev, node_uuid)
result = self.rpc.call_blockdev_close(node_uuid, self.instance.name,
self.instance.disks)
(self.instance.disks, self.instance))
result.Raise("Cannot change disk to secondary on node %s" %
self.cfg.GetNodeName(node_uuid))
......@@ -516,9 +515,8 @@ class TLMigrateInstance(Tasklet):
"""
self.feedback_fn("* changing into standalone mode")
result = self.rpc.call_drbd_disconnect_net(self.all_node_uuids,
self.nodes_ip,
self.instance.disks)
result = self.rpc.call_drbd_disconnect_net(
self.all_node_uuids, (self.instance.disks, self.instance))
for node_uuid, nres in result.items():
nres.Raise("Cannot disconnect disks node %s" %
self.cfg.GetNodeName(node_uuid))
......@@ -532,7 +530,7 @@ class TLMigrateInstance(Tasklet):
else:
msg = "single-master"
self.feedback_fn("* changing disks into %s mode" % msg)
result = self.rpc.call_drbd_attach_net(self.all_node_uuids, self.nodes_ip,
result = self.rpc.call_drbd_attach_net(self.all_node_uuids,
(self.instance.disks, self.instance),
self.instance.name, multimaster)
for node_uuid, nres in result.items():
......
......@@ -309,8 +309,7 @@ class LUInstanceQueryData(NoHooksLU):
return None
self.cfg.SetDiskID(dev, node_uuid)
result = self.rpc.call_blockdev_find(node_uuid, dev)
result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
if result.offline:
return None
......
......@@ -80,9 +80,9 @@ def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
"""
lu.cfg.SetDiskID(device, node_uuid)
result = lu.rpc.call_blockdev_create(node_uuid, device, device.size,
instance.name, force_open, info,
excl_stor)
result = lu.rpc.call_blockdev_create(node_uuid, (device, instance),
device.size, instance.name, force_open,
info, excl_stor)
result.Raise("Can't create block device %s on"
" node %s for instance %s" % (device,
lu.cfg.GetNodeName(node_uuid),
......@@ -184,7 +184,7 @@ def _CreateBlockDev(lu, node_uuid, instance, device, force_create, info,
force_open, excl_stor)
def _UndoCreateDisks(lu, disks_created):
def _UndoCreateDisks(lu, disks_created, instance):
"""Undo the work performed by L{CreateDisks}.
This function is called in case of an error to undo the work of
......@@ -193,11 +193,13 @@ def _UndoCreateDisks(lu, disks_created):
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@param disks_created: the result returned by L{CreateDisks}
@type instance: L{objects.Instance}
@param instance: the instance for which disks were created
"""
for (node_uuid, disk) in disks_created:
lu.cfg.SetDiskID(disk, node_uuid)
result = lu.rpc.call_blockdev_remove(node_uuid, disk)
result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
result.Warn("Failed to remove newly-created disk %s on node %s" %
(disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
......@@ -259,7 +261,7 @@ def CreateDisks(lu, instance, to_skip=None, target_node_uuid=None, disks=None):
logging.warning("Creating disk %s for instance '%s' failed",
idx, instance.name)
disks_created.extend(e.created_devices)
_UndoCreateDisks(lu, disks_created)
_UndoCreateDisks(lu, disks_created, instance)
raise errors.OpExecError(e.message)
return disks_created
......@@ -1110,7 +1112,7 @@ def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None):
except errors.OpExecError:
logging.warning("Wiping disks for instance '%s' failed",
instance.name)
_UndoCreateDisks(lu, cleanup)
_UndoCreateDisks(lu, cleanup, instance)
raise
......@@ -1490,8 +1492,8 @@ class LUInstanceGrowDisk(LogicalUnit):
if wipe_disks:
# Get disk size from primary node for wiping
self.cfg.SetDiskID(self.disk, self.instance.primary_node)
result = self.rpc.call_blockdev_getdimensions(self.instance.primary_node,
[self.disk])
result = self.rpc.call_blockdev_getdimensions(
self.instance.primary_node, ([self.disk], self.instance))
result.Raise("Failed to retrieve disk size from node '%s'" %
self.instance.primary_node)
......@@ -1801,7 +1803,7 @@ def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
result = True
if on_primary or dev.AssembleOnSecondary():
rstats = lu.rpc.call_blockdev_find(node_uuid, dev)
rstats = lu.rpc.call_blockdev_find(node_uuid, (dev, instance))
msg = rstats.fail_msg
if msg:
lu.LogWarning("Can't find disk on node %s: %s",
......@@ -1844,7 +1846,7 @@ def _BlockdevFind(lu, node_uuid, dev, instance):
"""
(disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
return lu.rpc.call_blockdev_find(node_uuid, disk)
return lu.rpc.call_blockdev_find(node_uuid, (disk, instance))
def _GenerateUniqueNames(lu, exts):
......@@ -2298,8 +2300,8 @@ class TLReplaceDisks(Tasklet):
for lv in old_lvs:
self.cfg.SetDiskID(lv, node_uuid)
msg = self.rpc.call_blockdev_remove(node_uuid, lv).fail_msg
msg = self.rpc.call_blockdev_remove(node_uuid, (lv, self.instance)) \
.fail_msg
if msg:
self.lu.LogWarning("Can't remove old LV: %s", msg,
hint="remove unused LVs manually")
......@@ -2348,8 +2350,9 @@ class TLReplaceDisks(Tasklet):
for dev, old_lvs, new_lvs in iv_names.itervalues():
self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
result = self.rpc.call_blockdev_removechildren(self.target_node_uuid, dev,
old_lvs)
result = self.rpc.call_blockdev_removechildren(self.target_node_uuid,
(dev, self.instance),
(old_lvs, self.instance))
result.Raise("Can't detach drbd from local storage on node"
" %s for device %s" %
(self.cfg.GetNodeName(self.target_node_uuid), dev.iv_name))
......@@ -2370,7 +2373,8 @@ class TLReplaceDisks(Tasklet):
# Build the rename list based on what LVs exist on the node
rename_old_to_new = []
for to_ren in old_lvs:
result = self.rpc.call_blockdev_find(self.target_node_uuid, to_ren)
result = self.rpc.call_blockdev_find(self.target_node_uuid,
(to_ren, self.instance))
if not result.fail_msg and result.payload:
# device exists
rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
......@@ -2393,25 +2397,24 @@ class TLReplaceDisks(Tasklet):
# Intermediate steps of in memory modifications
for old, new in zip(old_lvs, new_lvs):
new.logical_id = old.logical_id
self.cfg.SetDiskID(new, self.target_node_uuid)
# We need to modify old_lvs so that removal later removes the
# right LVs, not the newly added ones; note that old_lvs is a
# copy here
for disk in old_lvs:
disk.logical_id = ren_fn(disk, temp_suffix)
self.cfg.SetDiskID(disk, self.target_node_uuid)
# Now that the new lvs have the old name, we can add them to the device
self.lu.LogInfo("Adding new mirror component on %s",
self.cfg.GetNodeName(self.target_node_uuid))
result = self.rpc.call_blockdev_addchildren(self.target_node_uuid,
(dev, self.instance), new_lvs)
(dev, self.instance),
(new_lvs, self.instance))
msg = result.fail_msg
if msg:
for new_lv in new_lvs:
msg2 = self.rpc.call_blockdev_remove(self.target_node_uuid,
new_lv).fail_msg
(new_lv, self.instance)).fail_msg
if msg2:
self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
hint=("cleanup manually the unused logical"
......@@ -2560,8 +2563,8 @@ class TLReplaceDisks(Tasklet):
" soon as possible"))
self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
self.instance.disks)[pnode]
result = self.rpc.call_drbd_disconnect_net(
[pnode], (self.instance.disks, self.instance))[pnode]
msg = result.fail_msg
if msg:
......@@ -2587,7 +2590,6 @@ class TLReplaceDisks(Tasklet):
" (standalone => connected)")
result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
self.new_node_uuid],
self.node_secondary_ip,
(self.instance.disks, self.instance),
self.instance.name,
False)
......
......@@ -275,7 +275,7 @@ def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
edata = device.ComputeNodeTree(instance.primary_node)
for node_uuid, disk in edata:
lu.cfg.SetDiskID(disk, node_uuid)
result = lu.rpc.call_blockdev_remove(node_uuid, disk)
result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
if result.fail_msg:
lu.LogWarning("Could not remove disk %s on node %s,"
" continuing anyway: %s", idx,
......
......@@ -1207,6 +1207,13 @@ DISK_DT_TYPES = {
DISK_DT_PARAMETERS = frozenset(DISK_DT_TYPES.keys())
# dynamic disk parameters
DDP_LOCAL_IP = "local-ip"
DDP_REMOTE_IP = "remote-ip"
DDP_PORT = "port"
DDP_LOCAL_MINOR = "local-minor"
DDP_REMOTE_MINOR = "remote-minor"
# OOB supported commands
OOB_POWER_ON = _constants.OOB_POWER_ON
OOB_POWER_OFF = _constants.OOB_POWER_OFF
......
......@@ -1209,7 +1209,8 @@ class ExportInstanceHelper:
self._feedback_fn("Removing snapshot of disk/%s on node %s" %
(disk_index, src_node))
result = self._lu.rpc.call_blockdev_remove(src_node, disk)
result = self._lu.rpc.call_blockdev_remove(src_node,
(disk, self._instance))
if result.fail_msg:
self._lu.LogWarning("Could not remove snapshot for disk/%d from node"
" %s: %s", disk_index, src_node, result.fail_msg)
......@@ -1242,7 +1243,7 @@ class ExportInstanceHelper:
# FIXME: pass debug option from opcode to backend
dt = DiskTransfer("snapshot/%s" % idx,
constants.IEIO_SCRIPT, (dev, idx),
constants.IEIO_SCRIPT, ((dev, instance), idx),
constants.IEIO_FILE, (path, ),
finished_fn)
transfers.append(dt)
......@@ -1300,7 +1301,7 @@ class ExportInstanceHelper:
finished_fn = compat.partial(self._TransferFinished, idx)
ieloop.Add(DiskExport(self._lu, instance.primary_node,
opts, host, port, instance, "disk%d" % idx,
constants.IEIO_SCRIPT, (dev, idx),
constants.IEIO_SCRIPT, ((dev, instance), idx),
timeouts, cbs, private=(idx, finished_fn)))
ieloop.Run()
......@@ -1482,7 +1483,7 @@ def RemoteImport(lu, feedback_fn, instance, pnode, source_x509_ca,
ieloop.Add(DiskImport(lu, instance.primary_node, opts, instance,
"disk%d" % idx,
constants.IEIO_SCRIPT, (dev, idx),
constants.IEIO_SCRIPT, ((dev, instance), idx),
timeouts, cbs, private=(idx, )))
ieloop.Run()
......
......@@ -506,9 +506,11 @@ class NIC(ConfigObject):
class Disk(ConfigObject):
"""Config object representing a block device."""
__slots__ = (["name", "dev_type", "logical_id", "physical_id",
"children", "iv_name", "size", "mode", "params", "spindles"] +
_UUID)
__slots__ = (["name", "dev_type", "logical_id", "physical_id", "children", "iv_name",
"size", "mode", "params", "spindles"] + _UUID +
# dynamic_params is special. It depends on the node this instance
# is sent to, and should not be persisted.
["dynamic_params"])
def CreateOnSecondary(self):
"""Test if this device needs to be created on a secondary node."""
......@@ -696,49 +698,50 @@ class Disk(ConfigObject):
child.UnsetSize()
self.size = 0
def SetPhysicalID(self, target_node_uuid, nodes_ip):
"""Convert the logical ID to the physical ID.
def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
"""Updates the dynamic disk params for the given node.
This is used only for drbd, which needs ip/port configuration.
The routine descends down and updates its children also, because
this helps when the only the top device is passed to the remote
node.
This is mainly used for drbd, which needs ip/port configuration.
Arguments:
- target_node_uuid: the node UUID we wish to configure for
- nodes_ip: a mapping of node name to ip
The target_node must exist in in nodes_ip, and must be one of the
nodes in the logical ID for each of the DRBD devices encountered
in the disk tree.
The target_node must exist in nodes_ip, and should be one of the
nodes in the logical ID if this device is a DRBD device.
"""
if self.children:
for child in self.children:
child.SetPhysicalID(target_node_uuid, nodes_ip)
child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
if self.logical_id is None and self.physical_id is not None:
return
if self.dev_type in constants.LDS_DRBD:
pnode_uuid, snode_uuid, port, pminor, sminor, secret = self.logical_id
dyn_disk_params = {}
if self.logical_id is not None and self.dev_type in constants.LDS_DRBD:
pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
if target_node_uuid not in (pnode_uuid, snode_uuid):
raise errors.ConfigurationError("DRBD device not knowing node %s" %
target_node_uuid)
# disk object is being sent to neither the primary nor the secondary
# node. reset the dynamic parameters, the target node is not
# supposed to use them.
self.dynamic_params = dyn_disk_params
return
pnode_ip = nodes_ip.get(pnode_uuid, None)
snode_ip = nodes_ip.get(snode_uuid, None)
if pnode_ip is None or snode_ip is None:
raise errors.ConfigurationError("Can't find primary or secondary node"
" for %s" % str(self))
p_data = (pnode_ip, port)
s_data = (snode_ip, port)
if pnode_uuid == target_node_uuid:
self.physical_id = p_data + s_data + (pminor, secret)
dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip
dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip
dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor
dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor
else: # it must be secondary, we tested above
self.physical_id = s_data + p_data + (sminor, secret)
else:
self.physical_id = self.logical_id
return
dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip
dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip
dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor
dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor
self.dynamic_params = dyn_disk_params
def ToDict(self):
"""Disk-specific conversion to standard python types.
......
......@@ -126,7 +126,7 @@ def RunWithRPC(fn):
return wrapper