Commit f9193417 authored by Iustin Pop's avatar Iustin Pop
Browse files

Remove the option to create md/drbd7 instances

This patch removes the options that allow to create local_raid1 or
remote_raid1 instances. It also modifies the documentation and removes
these disk templates from burnin and from qa.

Reviewed-by: imsnah
parent 249069a1
......@@ -2713,43 +2713,6 @@ def _GenerateDiskTemplate(cfg, template_name,
logical_id=(vgname, names[1]),
iv_name = "sdb")
disks = [sda_dev, sdb_dev]
elif template_name == constants.DT_LOCAL_RAID1:
if len(secondary_nodes) != 0:
raise errors.ProgrammerError("Wrong template configuration")
names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
".sdb_m1", ".sdb_m2"])
sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
logical_id=(vgname, names[0]))
sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
logical_id=(vgname, names[1]))
md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
size=disk_sz,
children = [sda_dev_m1, sda_dev_m2])
sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
logical_id=(vgname, names[2]))
sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
logical_id=(vgname, names[3]))
md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
size=swap_sz,
children = [sdb_dev_m1, sdb_dev_m2])
disks = [md_sda_dev, md_sdb_dev]
elif template_name == constants.DT_REMOTE_RAID1:
if len(secondary_nodes) != 1:
raise errors.ProgrammerError("Wrong template configuration")
remote_node = secondary_nodes[0]
names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
".sdb_data", ".sdb_meta"])
drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
disk_sz, names[0:2])
md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
children = [drbd_sda_dev], size=disk_sz)
drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
swap_sz, names[2:4])
md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
children = [drbd_sdb_dev], size=swap_sz)
disks = [md_sda_dev, md_sdb_dev]
elif template_name == constants.DT_DRBD8:
if len(secondary_nodes) != 1:
raise errors.ProgrammerError("Wrong template configuration")
......@@ -2960,9 +2923,7 @@ class LUCreateInstance(LogicalUnit):
req_size_dict = {
constants.DT_DISKLESS: None,
constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
# 256 MB are added for drbd metadata, 128MB for each drbd device
constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
}
......
......@@ -85,8 +85,6 @@
<arg choice="req">-t<group>
<arg>diskless</arg>
<arg>plain</arg>
<arg>local_raid1</arg>
<arg>remote_raid1</arg>
<arg>drbd</arg>
</group>
</arg>
......@@ -236,36 +234,12 @@
<para>Disk devices will be logical volumes.</para>
</listitem>
</varlistentry>
<varlistentry>
<term>local_raid1</term>
<listitem>
<para>
Disk devices will be md raid1 arrays over two local
logical volumes.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>remote_raid1</term>
<listitem>
<para>
Disk devices will be md raid1 arrays with one
component (so it's not actually raid1): a drbd
(0.7.x) device between the instance's primary node
and the node given by the second value of the
<option>--node</option> option.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>drbd</term>
<listitem>
<para>
Disk devices will be drbd (version 8.x) on top of
lvm volumes. They are equivalent in functionality to
<replaceable>remote_raid1</replaceable>, but are
recommended for new instances (if you have drbd 8.x
installed).
lvm volumes.
</para>
</listitem>
</varlistentry>
......@@ -288,7 +262,7 @@
<screen>
# gnt-instance add -t plain -s 30g -m 512 -o debian-etch \
-n node1.example.com instance1.example.com
# gnt-instance add -t remote_raid1 -s 30g -m 512 -o debian-etch \
# gnt-instance add -t drbd -s 30g -m 512 -o debian-etch \
-n node1.example.com:node2.example.com instance2.example.com
</screen>
</para>
......
......@@ -268,16 +268,7 @@ def main():
RunTest(qa_instance.TestInstanceRemove, instance)
del instance
if qa_config.TestEnabled('instance-add-local-mirror-disk'):
instance = RunTest(qa_instance.TestInstanceAddWithLocalMirrorDisk, pnode)
RunCommonInstanceTests(instance)
RunExportImportTests(instance, pnode)
RunTest(qa_instance.TestInstanceRemove, instance)
del instance
multinode_tests = [
('instance-add-remote-raid-disk',
qa_instance.TestInstanceAddWithRemoteRaidDisk),
('instance-add-drbd-disk',
qa_instance.TestInstanceAddWithDrbdDisk),
]
......
......@@ -49,13 +49,9 @@ tests:
node-failover: False
instance-add-plain-disk: True
instance-add-local-mirror-disk: True
# Requires DRBD 0.7.x
instance-add-remote-raid-disk: True
# Requires DRBD 8.x
instance-add-drbd-disk: False
instance-add-drbd-disk: True
instance-list: True
instance-failover: True
......@@ -76,7 +72,7 @@ tests:
# Other settings
options:
burnin-instances: 2
burnin-disk-template: remote_raid1
burnin-disk-template: drbd
# Directory containing QA hooks
#hooks-dir: hooks/
......
......@@ -156,7 +156,7 @@ def TestClusterBurnin():
master = qa_config.GetMasterNode()
disk_template = (qa_config.get('options', {}).
get('burnin-disk-template', 'remote_raid1'))
get('burnin-disk-template', 'drbd'))
# Get as many instances as we need
instances = []
......
......@@ -54,11 +54,6 @@ def Validate():
if len(cfg['instances']) < 1:
raise qa_error.Error("Need at least one instance")
if (TestEnabled('instance-add-remote-raid-disk') and
TestEnabled('instance-add-drbd-disk')):
raise qa_error.Error('Tests for disk templates remote_raid1 and drbd'
' cannot be enabled at the same time.')
def get(name, default=None):
return cfg.get(name, default)
......
......@@ -72,19 +72,6 @@ def TestInstanceAddWithPlainDisk(node):
return _DiskTest(node['primary'], 'plain')
@qa_utils.DefineHook('instance-add-local-mirror-disk')
def TestInstanceAddWithLocalMirrorDisk(node):
"""gnt-instance add -t local_raid1"""
return _DiskTest(node['primary'], 'local_raid1')
@qa_utils.DefineHook('instance-add-remote-raid-disk')
def TestInstanceAddWithRemoteRaidDisk(node, node2):
"""gnt-instance add -t remote_raid1"""
return _DiskTest("%s:%s" % (node['primary'], node2['primary']),
'remote_raid1')
@qa_utils.DefineHook('instance-add-drbd-disk')
def TestInstanceAddWithDrbdDisk(node, node2):
"""gnt-instance add -t drbd"""
......
......@@ -726,8 +726,8 @@ add_opts = [
make_option("-p", "--cpu", dest="vcpus", help="Number of virtual CPUs",
default=1, type="int", metavar="<PROC>"),
make_option("-t", "--disk-template", dest="disk_template",
help="Custom disk setup (diskless, plain, local_raid1,"
" remote_raid1 or drbd)", default=None, metavar="TEMPL"),
help="Custom disk setup (diskless, plain, or drbd)",
default=None, metavar="TEMPL"),
make_option("-i", "--ip", dest="ip",
help="IP address ('none' [default], 'auto', or specify address)",
default='none', type="string", metavar="<ADDRESS>"),
......
......@@ -131,10 +131,9 @@ class Burner(object):
help="Skip instance stop/start", action="store_false",
default=True)
parser.add_option("-t", "--disk-template", dest="disk_template",
choices=("diskless", "plain", "remote_raid1", "drbd"),
default="remote_raid1",
help="Template type for network mirroring (remote_raid1"
" or drbd) [remote_raid1]")
choices=("diskless", "plain", "drbd"),
default="drbd",
help="Disk template (diskless, plain or drbd) [drbd]")
parser.add_option("-n", "--nodes", dest="nodes", default="",
help="Comma separated list of nodes to perform"
" the burnin on (defaults to all nodes)")
......@@ -143,8 +142,8 @@ class Burner(object):
if len(args) < 1 or options.os is None:
Usage()
supported_disk_templates = (constants.DT_DISKLESS, constants.DT_PLAIN,
constants.DT_REMOTE_RAID1,
supported_disk_templates = (constants.DT_DISKLESS,
constants.DT_PLAIN,
constants.DT_DRBD8)
if options.disk_template not in supported_disk_templates:
Log("Unknown disk template '%s'" % options.disk_template)
......@@ -219,18 +218,6 @@ class Burner(object):
self.ExecOp(op)
self.to_rem.append(instance)
def ReplaceDisks1R1(self):
"""Replace disks with the same secondary for rr1."""
# replace all, both disks
for instance in self.instances:
op = opcodes.OpReplaceDisks(instance_name=instance,
remote_node=None,
mode=constants.REPLACE_DISK_ALL,
disks=["sda", "sdb"])
Log("- Replace disks for instance %s" % (instance))
self.ExecOp(op)
def ReplaceDisks1D8(self):
"""Replace disks on primary and secondary for drbd8."""
for instance in self.instances:
......@@ -243,10 +230,7 @@ class Burner(object):
def ReplaceDisks2(self):
"""Replace secondary node."""
if self.opts.disk_template == constants.DT_REMOTE_RAID1:
mode = constants.REPLACE_DISK_ALL
else:
mode = constants.REPLACE_DISK_SEC
mode = constants.REPLACE_DISK_SEC
mytor = izip(islice(cycle(self.nodes), 2, None),
self.instances)
......@@ -352,10 +336,7 @@ class Burner(object):
try:
self.CreateInstances()
if opts.do_replace1 and opts.disk_template in constants.DTS_NET_MIRROR:
if opts.disk_template == constants.DT_REMOTE_RAID1:
self.ReplaceDisks1R1()
elif opts.disk_template == constants.DT_DRBD8:
self.ReplaceDisks1D8()
self.ReplaceDisks1D8()
if (opts.do_replace2 and len(self.nodes) > 2 and
opts.disk_template in constants.DTS_NET_MIRROR) :
self.ReplaceDisks2()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment