Commit abdf0113 authored by Iustin Pop's avatar Iustin Pop

Complete removal of md/drbd 0.7 code

This patch removes the last of the md and drbd 0.7 code. Cluster which
have the old device types will be broken if they have this applied.

Reviewed-by: imsnah
parent 5c54b832
This diff is collapsed.
......@@ -2872,7 +2872,7 @@ class LUFailoverInstance(LogicalUnit):
secondary_nodes = instance.secondary_nodes
if not secondary_nodes:
raise errors.ProgrammerError("no secondary node but using "
"DT_REMOTE_RAID1 template")
"a mirrored disk template")
target_node = secondary_nodes[0]
# check memory requirements on the secondary node
......@@ -2902,7 +2902,7 @@ class LUFailoverInstance(LogicalUnit):
feedback_fn("* checking disk consistency between source and target")
for dev in instance.disks:
# for remote_raid1, these are md over drbd
# for drbd, these are drbd over lvm
if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
if instance.status == "up" and not self.op.ignore_consistency:
raise errors.OpExecError("Disk %s is degraded on target node,"
......@@ -3751,13 +3751,6 @@ class LUReplaceDisks(LogicalUnit):
# replacement as for drbd7 (no different port allocated)
raise errors.OpPrereqError("Same secondary given, cannot execute"
" replacement")
# the user gave the current secondary, switch to
# 'no-replace-secondary' mode for drbd7
remote_node = None
if (instance.disk_template == constants.DT_REMOTE_RAID1 and
self.op.mode != constants.REPLACE_DISK_ALL):
raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
" disks replacement, not individual ones")
if instance.disk_template == constants.DT_DRBD8:
if (self.op.mode == constants.REPLACE_DISK_ALL and
remote_node is not None):
......@@ -3789,101 +3782,6 @@ class LUReplaceDisks(LogicalUnit):
self.op.remote_node = remote_node
def _ExecRR1(self, feedback_fn):
"""Replace the disks of an instance.
instance = self.instance
iv_names = {}
# start of work
if self.op.remote_node is None:
remote_node = self.sec_node
remote_node = self.op.remote_node
cfg = self.cfg
for dev in instance.disks:
size = dev.size
lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
names = _GenerateUniqueNames(cfg, lv_names)
new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
remote_node, size, names)
iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
logger.Info("adding new mirror component on secondary for %s" %
if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
new_drbd, False,
raise errors.OpExecError("Failed to create new component on secondary"
" node %s. Full abort, cleanup manually!" %
logger.Info("adding new mirror component on primary")
if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
instance, new_drbd,
# remove secondary dev
cfg.SetDiskID(new_drbd, remote_node)
rpc.call_blockdev_remove(remote_node, new_drbd)
raise errors.OpExecError("Failed to create volume on primary!"
" Full abort, cleanup manually!!")
# the device exists now
# call the primary node to add the mirror to md
logger.Info("adding new mirror component to md")
if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
logger.Error("Can't add mirror compoment to md!")
cfg.SetDiskID(new_drbd, remote_node)
if not rpc.call_blockdev_remove(remote_node, new_drbd):
logger.Error("Can't rollback on secondary")
cfg.SetDiskID(new_drbd, instance.primary_node)
if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
logger.Error("Can't rollback on primary")
raise errors.OpExecError("Full abort, cleanup manually!!")
# this can fail as the old devices are degraded and _WaitForSync
# does a combined result over all disks, so we don't check its
# return value
_WaitForSync(cfg, instance, self.proc, unlock=True)
# so check manually all the devices
for name in iv_names:
dev, child, new_drbd = iv_names[name]
cfg.SetDiskID(dev, instance.primary_node)
is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
if is_degr:
raise errors.OpExecError("MD device %s is degraded!" % name)
cfg.SetDiskID(new_drbd, instance.primary_node)
is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
if is_degr:
raise errors.OpExecError("New drbd device %s is degraded!" % name)
for name in iv_names:
dev, child, new_drbd = iv_names[name]
logger.Info("remove mirror %s component" % name)
cfg.SetDiskID(dev, instance.primary_node)
if not rpc.call_blockdev_removechildren(instance.primary_node,
dev, [child]):
logger.Error("Can't remove child from mirror, aborting"
" *this device cleanup*.\nYou need to cleanup manually!!")
for node in child.logical_id[:2]:
logger.Info("remove child device on %s" % node)
cfg.SetDiskID(child, node)
if not rpc.call_blockdev_remove(node, child):
logger.Error("Warning: failed to remove device from node %s,"
" continuing operation." % node)
def _ExecD8DiskOnly(self, feedback_fn):
"""Replace a disk on the primary or secondary for dbrd8.
......@@ -4225,9 +4123,7 @@ class LUReplaceDisks(LogicalUnit):
instance = self.instance
if instance.disk_template == constants.DT_REMOTE_RAID1:
fn = self._ExecRR1
elif instance.disk_template == constants.DT_DRBD8:
if instance.disk_template == constants.DT_DRBD8:
if self.op.remote_node is None:
fn = self._ExecD8DiskOnly
......@@ -101,21 +101,17 @@ HKR_SUCCESS = 2
# disk template types
DT_DISKLESS = "diskless"
DT_PLAIN = "plain"
DT_LOCAL_RAID1 = "local_raid1"
DT_REMOTE_RAID1 = "remote_raid1"
DT_DRBD8 = "drbd"
DT_FILE = "file"
# the set of network-mirrored disk templates
DTS_NET_MIRROR = frozenset([DT_DRBD8])
# the set of non-lvm-based disk templates
# logical disk types
LD_LV = "lvm"
LD_MD_R1 = "md_raid1"
LD_DRBD7 = "drbd"
LD_DRBD8 = "drbd8"
LD_FILE = "file"
......@@ -124,7 +120,7 @@ FD_LOOP = "loop"
FD_BLKTAP = "blktap"
# the set of drbd-like disk types
LDS_DRBD = frozenset([LD_DRBD7, LD_DRBD8])
LDS_DRBD = frozenset([LD_DRBD8])
# disk replacement mode
REPLACE_DISK_PRI = "replace_primary"
......@@ -136,7 +132,6 @@ INSTANCE_CREATE = "create"
......@@ -342,12 +342,9 @@ class Disk(ConfigObject):
This method, given the node on which the parent disk lives, will
return the list of all (node, disk) pairs which describe the disk
tree in the most compact way. For example, a md/drbd/lvm stack
will be returned as (primary_node, md) and (secondary_node, drbd)
which represents all the top-level devices on the nodes. This
means that on the primary node we need to activate the the md (and
recursively all its children) and on the secondary node we need to
activate the drbd device (and its children, the two lvm volumes).
tree in the most compact way. For example, a drbd/lvm stack
will be returned as (primary_node, drbd) and (secondary_node, drbd)
which represents all the top-level devices on the nodes.
my_nodes = self.GetNodes(parent_node)
......@@ -142,8 +142,8 @@ import_opts = [
make_option("-p", "--cpu", dest="vcpus", help="Number of virtual CPUs",
default=1, type="int", metavar="<PROC>"),
make_option("-t", "--disk-template", dest="disk_template",
help="Custom disk setup (diskless, plain, local_raid1,"
" remote_raid1 or drbd)", default=None, metavar="TEMPL"),
help="Custom disk setup (diskless, file, plain, drbd)"
default=None, metavar="TEMPL"),
make_option("-i", "--ip", dest="ip",
help="IP address ('none' [default], 'auto', or specify address)",
default='none', type="string", metavar="<ADDRESS>"),
......@@ -300,8 +300,8 @@ commands = {
"[-f] <src> <dst>",
"Relocate the secondary instances from the first node"
" to the second one (only for instances of type remote_raid1"
" drbd)"),
" to the second one (only for instances with drbd disk template"
'failover': (FailoverNode, ARGS_ONE,
make_option("--ignore-consistency", dest="ignore_consistency",
......@@ -311,7 +311,7 @@ commands = {
"[-f] <node>",
"Stops the primary instances on a node and start them on their"
" secondary node (only for instances of type remote_raid1)"),
" secondary node (only for instances with drbd disk template)"),
'info': (ShowNodeConfig, ARGS_ANY, [DEBUG_OPT],
"[<node_name>...]", "Show information about the node(s)"),
'list': (ListNodes, ARGS_NONE,
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment