diff --git a/Makefile.am b/Makefile.am index 2b63afcad6a06978e29c8948ce7900a15804f00a..d27fd141c3547168cd05b5a0dc88408ea26ffdcb 100644 --- a/Makefile.am +++ b/Makefile.am @@ -212,9 +212,11 @@ maninput = $(patsubst %.7,%.7.in,$(patsubst %.8,%.8.in,$(man_MANS))) $(patsubst TEST_FILES = \ test/data/bdev-both.txt \ + test/data/bdev-8.3-both.txt \ test/data/bdev-disk.txt \ test/data/bdev-net.txt \ - test/data/proc_drbd8.txt + test/data/proc_drbd8.txt \ + test/data/proc_drbd83.txt dist_TESTS = \ test/ganeti.bdev_unittest.py \ @@ -235,7 +237,7 @@ nodist_TESTS = TESTS = $(dist_TESTS) $(nodist_TESTS) -TESTS_ENVIRONMENT = PYTHONPATH=.:$(top_builddir) +TESTS_ENVIRONMENT = PYTHONPATH=.:$(top_builddir) $(PYTHON) all-local: stamp-directories lib/_autoconf.py devel/upload \ doc/examples/bash_completion \ diff --git a/daemons/ganeti-masterd b/daemons/ganeti-masterd index 4c6a9681cb63fba3c1f77e1a0fe5ff9c54e33a4d..6a73e4e23ea8fbab8ac4b853be5008c44d853b70 100755 --- a/daemons/ganeti-masterd +++ b/daemons/ganeti-masterd @@ -36,7 +36,6 @@ import collections import Queue import random import signal -import simplejson import logging from cStringIO import StringIO @@ -55,6 +54,7 @@ from ganeti import ssconf from ganeti import workerpool from ganeti import rpc from ganeti import bootstrap +from ganeti import serializer CLIENT_REQUEST_WORKERS = 16 @@ -152,7 +152,7 @@ class ClientRqHandler(SocketServer.BaseRequestHandler): logging.debug("client closed connection") break - request = simplejson.loads(msg) + request = serializer.LoadJson(msg) logging.debug("request: %s", request) if not isinstance(request, dict): logging.error("wrong request received: %s", msg) @@ -181,7 +181,7 @@ class ClientRqHandler(SocketServer.BaseRequestHandler): luxi.KEY_RESULT: result, } logging.debug("response: %s", response) - self.send_message(simplejson.dumps(response)) + self.send_message(serializer.DumpJson(response)) def read_message(self): while not self._msgs: diff --git a/daemons/ganeti-noded b/daemons/ganeti-noded index ac90090845ac366cd0de093a78c7c1fa3c2d0e12..21b9b5074556db4829c377e35f8ee9dedd3d8ddf 100755 --- a/daemons/ganeti-noded +++ b/daemons/ganeti-noded @@ -722,7 +722,7 @@ def ParseOptions(): """ parser = OptionParser(description="Ganeti node daemon", - usage="%prog [-f] [-d]", + usage="%prog [-f] [-d] [-b ADDRESS]", version="%%prog (ganeti) %s" % constants.RELEASE_VERSION) @@ -732,6 +732,10 @@ def ParseOptions(): parser.add_option("-d", "--debug", dest="debug", help="Enable some debug messages", default=False, action="store_true") + parser.add_option("-b", "--bind", dest="bind_address", + help="Bind address", + default="", metavar="ADDRESS") + options, args = parser.parse_args() return options, args @@ -782,7 +786,7 @@ def main(): queue_lock = jstore.InitAndVerifyQueue(must_lock=False) mainloop = daemon.Mainloop() - server = NodeHttpServer(mainloop, "", port, + server = NodeHttpServer(mainloop, options.bind_address, port, ssl_params=ssl_params, ssl_verify_peer=True) server.Start() try: diff --git a/doc/examples/ganeti.initd.in b/doc/examples/ganeti.initd.in index 10bc684da88732f8a882f9932d6d0dbe40a11966..b346530d9001ea025c02f84ec074e063785f7feb 100644 --- a/doc/examples/ganeti.initd.in +++ b/doc/examples/ganeti.initd.in @@ -16,17 +16,22 @@ DESC="Ganeti cluster" GANETIRUNDIR="@LOCALSTATEDIR@/run/ganeti" +GANETI_DEFAULTS_FILE="@SYSCONFDIR@/default/ganeti" + NODED_NAME="ganeti-noded" NODED="@PREFIX@/sbin/${NODED_NAME}" NODED_PID="${GANETIRUNDIR}/${NODED_NAME}.pid" +NODED_ARGS="" MASTERD_NAME="ganeti-masterd" MASTERD="@PREFIX@/sbin/${MASTERD_NAME}" MASTERD_PID="${GANETIRUNDIR}/${MASTERD_NAME}.pid" +MASTERD_ARGS="" RAPI_NAME="ganeti-rapi" RAPI="@PREFIX@/sbin/${RAPI_NAME}" RAPI_PID="${GANETIRUNDIR}/${RAPI_NAME}.pid" +RAPI_ARGS="" SCRIPTNAME="@SYSCONFDIR@/init.d/ganeti" @@ -34,6 +39,10 @@ test -f $NODED || exit 0 . /lib/lsb/init-functions +if [ -s $GANETI_DEFAULTS_FILE ]; then + . $GANETI_DEFAULTS_FILE +fi + check_config() { for fname in \ "@LOCALSTATEDIR@/lib/ganeti/server.pem" @@ -84,16 +93,16 @@ case "$1" in start) log_daemon_msg "Starting $DESC" "$NAME" check_config - start_action $NODED $NODED_PID - start_action $MASTERD $MASTERD_PID - start_action $RAPI $RAPI_PID - ;; + start_action $NODED $NODED_PID $NODED_ARGS + start_action $MASTERD $MASTERD_PID $MASTERD_ARGS + start_action $RAPI $RAPI_PID $RAPI_ARGS + ;; stop) log_daemon_msg "Stopping $DESC" "$NAME" stop_action $RAPI $RAPI_PID stop_action $MASTERD $MASTERD_PID stop_action $NODED $NODED_PID - ;; + ;; restart|force-reload) log_daemon_msg "Reloading $DESC" stop_action $RAPI $RAPI_PID @@ -103,11 +112,11 @@ case "$1" in start_action $NODED $NODED_PID start_action $MASTERD $MASTERD_PID start_action $RAPI $RAPI_PID - ;; + ;; *) log_success_msg "Usage: $SCRIPTNAME {start|stop|force-reload|restart}" exit 1 - ;; + ;; esac exit 0 diff --git a/lib/bdev.py b/lib/bdev.py index 545cc0b5729e8baa89dbfdbee7d7cac3ee217e90..9d3f08b096e3c71fa215f1a268313fc483e1b26a 100644 --- a/lib/bdev.py +++ b/lib/bdev.py @@ -563,7 +563,7 @@ class DRBD8Status(object): """ UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$") - LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+st:([^/]+)/(\S+)" + LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)" "\s+ds:([^/]+)/(\S+)\s+.*$") SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*" "\sfinish: ([0-9]+):([0-9]+):([0-9]+)\s.*$") @@ -896,15 +896,20 @@ class DRBD8(BaseDRBD): # value types value = pyp.Word(pyp.alphanums + '_-/.:') quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote - addr_port = (pyp.Word(pyp.nums + '.') + pyp.Literal(':').suppress() + - number) + addr_type = (pyp.Optional(pyp.Literal("ipv4")).suppress() + + pyp.Optional(pyp.Literal("ipv6")).suppress()) + addr_port = (addr_type + pyp.Word(pyp.nums + '.') + + pyp.Literal(':').suppress() + number) # meta device, extended syntax meta_value = ((value ^ quoted) + pyp.Literal('[').suppress() + number + pyp.Word(']').suppress()) + # device name, extended syntax + device_value = pyp.Literal("minor").suppress() + number # a statement stmt = (~rbrace + keyword + ~lbrace + - pyp.Optional(addr_port ^ value ^ quoted ^ meta_value) + + pyp.Optional(addr_port ^ value ^ quoted ^ meta_value ^ + device_value) + pyp.Optional(defa) + semi + pyp.Optional(pyp.restOfLine).suppress()) diff --git a/lib/bootstrap.py b/lib/bootstrap.py index edee90d3c92f7eb2f9eae88096337f85ea174e31..0308484e03b81aea8739bc31598abcf359ff1ca9 100644 --- a/lib/bootstrap.py +++ b/lib/bootstrap.py @@ -25,7 +25,6 @@ import os import os.path -import sha import re import logging import tempfile diff --git a/lib/cli.py b/lib/cli.py index 6db7cea77126585f44bdd4d178a8b2eb9e189f90..17dc47c174d21617eb4b85943ec007fb27e784b0 100644 --- a/lib/cli.py +++ b/lib/cli.py @@ -815,6 +815,8 @@ def GenerateTable(headers, fields, separator, data, format = separator.replace("%", "%%").join(format_fields) for row in data: + if row is None: + continue for idx, val in enumerate(row): if unitfields.Matches(fields[idx]): try: @@ -840,6 +842,8 @@ def GenerateTable(headers, fields, separator, data, for line in data: args = [] + if line is None: + line = ['-' for _ in fields] for idx in xrange(len(fields)): if separator is None: args.append(mlens[idx]) diff --git a/lib/cmdlib.py b/lib/cmdlib.py index c9eae6859ae3ad7ed386b5cb7e61b2e809ecfff0..5edc7c3a364a14a8fddb8f885e1d4b426fc626af 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -25,7 +25,6 @@ import os import os.path -import sha import time import tempfile import re @@ -1524,8 +1523,11 @@ class LUSetClusterParams(LogicalUnit): """ if self.op.vg_name is not None: - if self.op.vg_name != self.cfg.GetVGName(): - self.cfg.SetVGName(self.op.vg_name) + new_volume = self.op.vg_name + if not new_volume: + new_volume = None + if new_volume != self.cfg.GetVGName(): + self.cfg.SetVGName(new_volume) else: feedback_fn("Cluster LVM configuration already in desired" " state, not changing") @@ -2441,6 +2443,10 @@ class LUQueryClusterInfo(NoHooksLU): for hypervisor in cluster.enabled_hypervisors]), "beparams": cluster.beparams, "candidate_pool_size": cluster.candidate_pool_size, + "default_bridge": cluster.default_bridge, + "master_netdev": cluster.master_netdev, + "volume_group_name": cluster.volume_group_name, + "file_storage_dir": cluster.file_storage_dir, } return result @@ -2755,15 +2761,48 @@ class LUStartupInstance(LogicalUnit): assert self.instance is not None, \ "Cannot retrieve locked instance %s" % self.op.instance_name + # extra beparams + self.beparams = getattr(self.op, "beparams", {}) + if self.beparams: + if not isinstance(self.beparams, dict): + raise errors.OpPrereqError("Invalid beparams passed: %s, expected" + " dict" % (type(self.beparams), )) + # fill the beparams dict + utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES) + self.op.beparams = self.beparams + + # extra hvparams + self.hvparams = getattr(self.op, "hvparams", {}) + if self.hvparams: + if not isinstance(self.hvparams, dict): + raise errors.OpPrereqError("Invalid hvparams passed: %s, expected" + " dict" % (type(self.hvparams), )) + + # check hypervisor parameter syntax (locally) + cluster = self.cfg.GetClusterInfo() + utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES) + filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor], + instance.hvparams) + filled_hvp.update(self.hvparams) + hv_type = hypervisor.GetHypervisor(instance.hypervisor) + hv_type.CheckParameterSyntax(filled_hvp) + _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp) + self.op.hvparams = self.hvparams + _CheckNodeOnline(self, instance.primary_node) bep = self.cfg.GetClusterInfo().FillBE(instance) # check bridges existance _CheckInstanceBridgesExist(self, instance) - _CheckNodeFreeMemory(self, instance.primary_node, - "starting instance %s" % instance.name, - bep[constants.BE_MEMORY], instance.hypervisor) + remote_info = self.rpc.call_instance_info(instance.primary_node, + instance.name, + instance.hypervisor) + remote_info.Raise() + if not remote_info.data: + _CheckNodeFreeMemory(self, instance.primary_node, + "starting instance %s" % instance.name, + bep[constants.BE_MEMORY], instance.hypervisor) def Exec(self, feedback_fn): """Start the instance. @@ -2778,7 +2817,8 @@ class LUStartupInstance(LogicalUnit): _StartInstanceDisks(self, instance, force) - result = self.rpc.call_instance_start(node_current, instance) + result = self.rpc.call_instance_start(node_current, instance, + self.hvparams, self.beparams) msg = result.RemoteFailMsg() if msg: _ShutdownInstanceDisks(self, instance) @@ -2860,7 +2900,7 @@ class LURebootInstance(LogicalUnit): " full reboot: %s" % msg) _ShutdownInstanceDisks(self, instance) _StartInstanceDisks(self, instance, ignore_secondaries) - result = self.rpc.call_instance_start(node_current, instance) + result = self.rpc.call_instance_start(node_current, instance, None, None) msg = result.RemoteFailMsg() if msg: _ShutdownInstanceDisks(self, instance) @@ -2960,7 +3000,8 @@ class LUReinstallInstance(LogicalUnit): remote_info = self.rpc.call_instance_info(instance.primary_node, instance.name, instance.hypervisor) - if remote_info.failed or remote_info.data: + remote_info.Raise() + if remote_info.data: raise errors.OpPrereqError("Instance '%s' is running on the node %s" % (self.op.instance_name, instance.primary_node)) @@ -3550,7 +3591,7 @@ class LUFailoverInstance(LogicalUnit): raise errors.OpExecError("Can't activate the instance's disks") feedback_fn("* starting the instance on the target node") - result = self.rpc.call_instance_start(target_node, instance) + result = self.rpc.call_instance_start(target_node, instance, None, None) msg = result.RemoteFailMsg() if msg: _ShutdownInstanceDisks(self, instance) @@ -4794,7 +4835,7 @@ class LUCreateInstance(LogicalUnit): self.cfg.Update(iobj) logging.info("Starting instance %s on node %s", instance, pnode_name) feedback_fn("* starting instance...") - result = self.rpc.call_instance_start(pnode_name, iobj) + result = self.rpc.call_instance_start(pnode_name, iobj, None, None) msg = result.RemoteFailMsg() if msg: raise errors.OpExecError("Could not start instance: %s" % msg) @@ -6242,7 +6283,7 @@ class LUExportInstance(LogicalUnit): finally: if self.op.shutdown and instance.admin_up: - result = self.rpc.call_instance_start(src_node, instance) + result = self.rpc.call_instance_start(src_node, instance, None, None) msg = result.RemoteFailMsg() if msg: _ShutdownInstanceDisks(self, instance) diff --git a/lib/constants.py b/lib/constants.py index 7ab360dadd855885b0641404daac1725f4c0cb4f..e5a210c4d5e2503d71387df77ac74ee118c77581 100644 --- a/lib/constants.py +++ b/lib/constants.py @@ -358,7 +358,7 @@ VNC_BASE_PORT = 5900 VNC_PASSWORD_FILE = _autoconf.SYSCONFDIR + "/ganeti/vnc-cluster-password" VNC_DEFAULT_BIND_ADDRESS = '0.0.0.0' -# Device types +# NIC types HT_NIC_RTL8139 = "rtl8139" HT_NIC_NE2K_PCI = "ne2k_pci" HT_NIC_NE2K_ISA = "ne2k_isa" @@ -368,25 +368,40 @@ HT_NIC_I8259ER = "i82559er" HT_NIC_PCNET = "pcnet" HT_NIC_E1000 = "e1000" HT_NIC_PARAVIRTUAL = HT_DISK_PARAVIRTUAL = "paravirtual" -HT_DISK_IOEMU = "ioemu" -HT_DISK_IDE = "ide" -HT_DISK_SCSI = "scsi" -HT_DISK_SD = "sd" -HT_DISK_MTD = "mtd" -HT_DISK_PFLASH = "pflash" HT_HVM_VALID_NIC_TYPES = frozenset([HT_NIC_RTL8139, HT_NIC_NE2K_PCI, HT_NIC_NE2K_ISA, HT_NIC_PARAVIRTUAL]) -HT_HVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IOEMU]) HT_KVM_VALID_NIC_TYPES = frozenset([HT_NIC_RTL8139, HT_NIC_NE2K_PCI, HT_NIC_NE2K_ISA, HT_NIC_I82551, HT_NIC_I85557B, HT_NIC_I8259ER, HT_NIC_PCNET, HT_NIC_E1000, HT_NIC_PARAVIRTUAL]) +# Disk types +HT_DISK_IOEMU = "ioemu" +HT_DISK_IDE = "ide" +HT_DISK_SCSI = "scsi" +HT_DISK_SD = "sd" +HT_DISK_MTD = "mtd" +HT_DISK_PFLASH = "pflash" + +HT_HVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IOEMU]) HT_KVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IDE, HT_DISK_SCSI, HT_DISK_SD, HT_DISK_MTD, HT_DISK_PFLASH]) +# Mouse types: +HT_MOUSE_MOUSE = "mouse" +HT_MOUSE_TABLET = "tablet" + +HT_KVM_VALID_MOUSE_TYPES = frozenset([HT_MOUSE_MOUSE, HT_MOUSE_TABLET]) + +# Boot order +HT_BO_CDROM = "cdrom" +HT_BO_DISK = "disk" +HT_BO_NETWORK = "network" + +HT_KVM_VALID_BO_TYPES = frozenset([HT_BO_CDROM, HT_BO_DISK, HT_BO_NETWORK]) + # Cluster Verify steps VERIFY_NPLUSONE_MEM = 'nplusone_mem' VERIFY_OPTIONAL_CHECKS = frozenset([VERIFY_NPLUSONE_MEM]) @@ -505,7 +520,7 @@ HVC_DEFAULTS = { HV_VNC_X509: '', HV_VNC_X509_VERIFY: False, HV_CDROM_IMAGE_PATH: '', - HV_BOOT_ORDER: "disk", + HV_BOOT_ORDER: HT_BO_DISK, HV_NIC_TYPE: HT_NIC_PARAVIRTUAL, HV_DISK_TYPE: HT_DISK_PARAVIRTUAL, HV_USB_MOUSE: '', diff --git a/lib/hypervisor/hv_base.py b/lib/hypervisor/hv_base.py index b094b37dfbf7dba6bfc3c45f073fb619d431c7e4..e5de1b7cc09423f391a61b246dacfb48d0dbb0a4 100644 --- a/lib/hypervisor/hv_base.py +++ b/lib/hypervisor/hv_base.py @@ -23,6 +23,9 @@ """ +import re + + from ganeti import errors @@ -187,3 +190,64 @@ class BaseHypervisor(object): """ pass + + def GetLinuxNodeInfo(self): + """For linux systems, return actual OS information. + + This is an abstraction for all non-hypervisor-based classes, where + the node actually sees all the memory and CPUs via the /proc + interface and standard commands. The other case if for example + xen, where you only see the hardware resources via xen-specific + tools. + + @return: a dict with the following keys (values in MiB): + - memory_total: the total memory size on the node + - memory_free: the available memory on the node for instances + - memory_dom0: the memory used by the node itself, if available + + """ + try: + fh = file("/proc/meminfo") + try: + data = fh.readlines() + finally: + fh.close() + except EnvironmentError, err: + raise errors.HypervisorError("Failed to list node info: %s" % (err,)) + + result = {} + sum_free = 0 + try: + for line in data: + splitfields = line.split(":", 1) + + if len(splitfields) > 1: + key = splitfields[0].strip() + val = splitfields[1].strip() + if key == 'MemTotal': + result['memory_total'] = int(val.split()[0])/1024 + elif key in ('MemFree', 'Buffers', 'Cached'): + sum_free += int(val.split()[0])/1024 + elif key == 'Active': + result['memory_dom0'] = int(val.split()[0])/1024 + except (ValueError, TypeError), err: + raise errors.HypervisorError("Failed to compute memory usage: %s" % + (err,)) + result['memory_free'] = sum_free + + cpu_total = 0 + try: + fh = open("/proc/cpuinfo") + try: + cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$", + fh.read())) + finally: + fh.close() + except EnvironmentError, err: + raise errors.HypervisorError("Failed to list node info: %s" % (err,)) + result['cpu_total'] = cpu_total + # FIXME: export correct data here + result['cpu_nodes'] = 1 + result['cpu_sockets'] = 1 + + return result diff --git a/lib/hypervisor/hv_fake.py b/lib/hypervisor/hv_fake.py index 48e645bf7a493075782a957079feaea2525da8ea..ccac8427730d419579e141910fba4f204f95b449 100644 --- a/lib/hypervisor/hv_fake.py +++ b/lib/hypervisor/hv_fake.py @@ -155,61 +155,19 @@ class FakeHypervisor(hv_base.BaseHypervisor): def GetNodeInfo(self): """Return information about the node. + This is just a wrapper over the base GetLinuxNodeInfo method. + @return: a dict with the following keys (values in MiB): - memory_total: the total memory size on the node - memory_free: the available memory on the node for instances - memory_dom0: the memory used by the node itself, if available """ - # global ram usage from the xm info command - # memory : 3583 - # free_memory : 747 - # note: in xen 3, memory has changed to total_memory - try: - fh = file("/proc/meminfo") - try: - data = fh.readlines() - finally: - fh.close() - except IOError, err: - raise errors.HypervisorError("Failed to list node info: %s" % err) - - result = {} - sum_free = 0 - for line in data: - splitfields = line.split(":", 1) - - if len(splitfields) > 1: - key = splitfields[0].strip() - val = splitfields[1].strip() - if key == 'MemTotal': - result['memory_total'] = int(val.split()[0])/1024 - elif key in ('MemFree', 'Buffers', 'Cached'): - sum_free += int(val.split()[0])/1024 - elif key == 'Active': - result['memory_dom0'] = int(val.split()[0])/1024 - result['memory_free'] = sum_free - + result = self.GetLinuxNodeInfo() # substract running instances all_instances = self.GetAllInstancesInfo() result['memory_free'] -= min(result['memory_free'], sum([row[2] for row in all_instances])) - - cpu_total = 0 - try: - fh = open("/proc/cpuinfo") - try: - cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$", - fh.read())) - finally: - fh.close() - except EnvironmentError, err: - raise errors.HypervisorError("Failed to list node info: %s" % err) - result['cpu_total'] = cpu_total - # FIXME: export correct data here - result['cpu_nodes'] = 1 - result['cpu_sockets'] = 1 - return result @classmethod diff --git a/lib/hypervisor/hv_kvm.py b/lib/hypervisor/hv_kvm.py index aeb21a07d72daa6a796be1bbf899e9fa72531d4e..3b9a0dfe287bf16149fa0ebfc91f4af8d42c48d2 100644 --- a/lib/hypervisor/hv_kvm.py +++ b/lib/hypervisor/hv_kvm.py @@ -246,9 +246,9 @@ class KVMHypervisor(hv_base.BaseHypervisor): kvm_cmd.extend(['-no-acpi']) hvp = instance.hvparams - boot_disk = hvp[constants.HV_BOOT_ORDER] == "disk" - boot_cdrom = hvp[constants.HV_BOOT_ORDER] == "cdrom" - boot_network = hvp[constants.HV_BOOT_ORDER] == "network" + boot_disk = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_DISK + boot_cdrom = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_CDROM + boot_network = hvp[constants.HV_BOOT_ORDER] == constants.HT_BO_NETWORK if boot_network: kvm_cmd.extend(['-boot', 'n']) @@ -622,57 +622,15 @@ class KVMHypervisor(hv_base.BaseHypervisor): def GetNodeInfo(self): """Return information about the node. + This is just a wrapper over the base GetLinuxNodeInfo method. + @return: a dict with the following keys (values in MiB): - memory_total: the total memory size on the node - memory_free: the available memory on the node for instances - memory_dom0: the memory used by the node itself, if available """ - # global ram usage from the xm info command - # memory : 3583 - # free_memory : 747 - # note: in xen 3, memory has changed to total_memory - try: - fh = file("/proc/meminfo") - try: - data = fh.readlines() - finally: - fh.close() - except EnvironmentError, err: - raise errors.HypervisorError("Failed to list node info: %s" % err) - - result = {} - sum_free = 0 - for line in data: - splitfields = line.split(":", 1) - - if len(splitfields) > 1: - key = splitfields[0].strip() - val = splitfields[1].strip() - if key == 'MemTotal': - result['memory_total'] = int(val.split()[0])/1024 - elif key in ('MemFree', 'Buffers', 'Cached'): - sum_free += int(val.split()[0])/1024 - elif key == 'Active': - result['memory_dom0'] = int(val.split()[0])/1024 - result['memory_free'] = sum_free - - cpu_total = 0 - try: - fh = open("/proc/cpuinfo") - try: - cpu_total = len(re.findall("(?m)^processor\s*:\s*[0-9]+\s*$", - fh.read())) - finally: - fh.close() - except EnvironmentError, err: - raise errors.HypervisorError("Failed to list node info: %s" % err) - result['cpu_total'] = cpu_total - # FIXME: export correct data here - result['cpu_nodes'] = 1 - result['cpu_sockets'] = 1 - - return result + return self.GetLinuxNodeInfo() @classmethod def GetShellCommandForConsole(cls, instance, hvparams, beparams): @@ -769,35 +727,39 @@ class KVMHypervisor(hv_base.BaseHypervisor): " an absolute path, if defined") boot_order = hvparams[constants.HV_BOOT_ORDER] - if boot_order not in ('cdrom', 'disk', 'network'): - raise errors.HypervisorError("The boot order must be 'cdrom', 'disk' or" - " 'network'") + if boot_order not in constants.HT_KVM_VALID_BO_TYPES: + raise errors.HypervisorError(\ + "The boot order must be one of %s" % + utils.CommaJoin(constants.HT_KVM_VALID_BO_TYPES)) - if boot_order == 'cdrom' and not iso_path: - raise errors.HypervisorError("Cannot boot from cdrom without an ISO path") + if boot_order == constants.HT_BO_CDROM and not iso_path: + raise errors.HypervisorError("Cannot boot from cdrom without an" + " ISO path") nic_type = hvparams[constants.HV_NIC_TYPE] if nic_type not in constants.HT_KVM_VALID_NIC_TYPES: - raise errors.HypervisorError("Invalid NIC type %s specified for the KVM" - " hypervisor. Please choose one of: %s" % - (nic_type, - constants.HT_KVM_VALID_NIC_TYPES)) - elif boot_order == 'network' and nic_type == constants.HT_NIC_PARAVIRTUAL: + raise errors.HypervisorError(\ + "Invalid NIC type %s specified for the KVM" + " hypervisor. Please choose one of: %s" % + (nic_type, utils.CommaJoin(constants.HT_KVM_VALID_NIC_TYPES))) + elif (boot_order == constants.HT_BO_NETWORK and + nic_type == constants.HT_NIC_PARAVIRTUAL): raise errors.HypervisorError("Cannot boot from a paravirtual NIC. Please" - " change the nic type.") + " change the NIC type.") disk_type = hvparams[constants.HV_DISK_TYPE] if disk_type not in constants.HT_KVM_VALID_DISK_TYPES: - raise errors.HypervisorError("Invalid disk type %s specified for the KVM" - " hypervisor. Please choose one of: %s" % - (disk_type, - constants.HT_KVM_VALID_DISK_TYPES)) + raise errors.HypervisorError(\ + "Invalid disk type %s specified for the KVM" + " hypervisor. Please choose one of: %s" % + (disk_type, utils.CommaJoin(constants.HT_KVM_VALID_DISK_TYPES))) mouse_type = hvparams[constants.HV_USB_MOUSE] - if mouse_type and mouse_type not in ('mouse', 'tablet'): - raise errors.HypervisorError("Invalid usb mouse type %s specified for" - " the KVM hyervisor. Please choose" - " 'mouse' or 'tablet'" % mouse_type) + if mouse_type and mouse_type not in constants.HT_KVM_VALID_MOUSE_TYPES: + raise errors.HypervisorError(\ + "Invalid usb mouse type %s specified for the KVM hypervisor. Please" + " choose one of %s" % + utils.CommaJoin(constants.HT_KVM_VALID_MOUSE_TYPES)) def ValidateParameters(self, hvparams): """Check the given parameters for validity. diff --git a/lib/hypervisor/hv_xen.py b/lib/hypervisor/hv_xen.py index 9d6ae60565258c759337e493bcb5e66e3d19ab61..9779e51b2ce486044ce9ca6172557cdff86559cc 100644 --- a/lib/hypervisor/hv_xen.py +++ b/lib/hypervisor/hv_xen.py @@ -537,16 +537,16 @@ class XenHvmHypervisor(XenHypervisor): # device type checks nic_type = hvparams[constants.HV_NIC_TYPE] if nic_type not in constants.HT_HVM_VALID_NIC_TYPES: - raise errors.HypervisorError("Invalid NIC type %s specified for the Xen" - " HVM hypervisor. Please choose one of: %s" - % (nic_type, - constants.HT_HVM_VALID_NIC_TYPES)) + raise errors.HypervisorError(\ + "Invalid NIC type %s specified for the Xen" + " HVM hypervisor. Please choose one of: %s" + % (nic_type, utils.CommaJoin(constants.HT_HVM_VALID_NIC_TYPES))) disk_type = hvparams[constants.HV_DISK_TYPE] if disk_type not in constants.HT_HVM_VALID_DISK_TYPES: - raise errors.HypervisorError("Invalid disk type %s specified for the Xen" - " HVM hypervisor. Please choose one of: %s" - % (disk_type, - constants.HT_HVM_VALID_DISK_TYPES)) + raise errors.HypervisorError(\ + "Invalid disk type %s specified for the Xen" + " HVM hypervisor. Please choose one of: %s" + % (disk_type, utils.CommaJoin(constants.HT_HVM_VALID_DISK_TYPES))) # vnc_bind_address verification vnc_bind_address = hvparams[constants.HV_VNC_BIND_ADDRESS] if vnc_bind_address: diff --git a/lib/opcodes.py b/lib/opcodes.py index 5be660bdc797b9b2e219b443b9367509f26c1051..535db910d0b38fba6c055dc24c0ed0ea1f943117 100644 --- a/lib/opcodes.py +++ b/lib/opcodes.py @@ -382,7 +382,7 @@ class OpStartupInstance(OpCode): """Startup an instance.""" OP_ID = "OP_INSTANCE_STARTUP" OP_DSC_FIELD = "instance_name" - __slots__ = ["instance_name", "force"] + __slots__ = ["instance_name", "force", "hvparams", "beparams"] class OpShutdownInstance(OpCode): diff --git a/lib/rpc.py b/lib/rpc.py index 599de0ac966726202663a0c4008cabeeb4aba872..11cdb1f18562af94a2d6d1a697c77b0667251ba4 100644 --- a/lib/rpc.py +++ b/lib/rpc.py @@ -260,7 +260,7 @@ class RpcRunner(object): self._cfg = cfg self.port = utils.GetNodeDaemonPort() - def _InstDict(self, instance): + def _InstDict(self, instance, hvp=None, bep=None): """Convert the given instance to a dict. This is done via the instance's ToDict() method and additionally @@ -268,6 +268,10 @@ class RpcRunner(object): @type instance: L{objects.Instance} @param instance: an Instance object + @type hvp: dict or None + @param hvp: a dictionary with overriden hypervisor parameters + @type bep: dict or None + @param bep: a dictionary with overriden backend parameters @rtype: dict @return: the instance dict, with the hvparams filled with the cluster defaults @@ -276,7 +280,11 @@ class RpcRunner(object): idict = instance.ToDict() cluster = self._cfg.GetClusterInfo() idict["hvparams"] = cluster.FillHV(instance) + if hvp is not None: + idict["hvparams"].update(hvp) idict["beparams"] = cluster.FillBE(instance) + if bep is not None: + idict["beparams"].update(bep) return idict def _ConnectList(self, client, node_list, call): @@ -425,14 +433,14 @@ class RpcRunner(object): """ return self._SingleNodeCall(node, "bridges_exist", [bridges_list]) - def call_instance_start(self, node, instance): + def call_instance_start(self, node, instance, hvp, bep): """Starts an instance. This is a single-node call. """ - return self._SingleNodeCall(node, "instance_start", - [self._InstDict(instance)]) + idict = self._InstDict(instance, hvp=hvp, bep=bep) + return self._SingleNodeCall(node, "instance_start", [idict]) def call_instance_shutdown(self, node, instance): """Stops an instance. diff --git a/lib/utils.py b/lib/utils.py index 304877933020d271e251b06d05f9be8055fcea7a..e876e2c3e6433273e520d1766e192321b19e2f0c 100644 --- a/lib/utils.py +++ b/lib/utils.py @@ -156,11 +156,18 @@ def RunCmd(cmd, env=None, output=None, cwd='/'): if env is not None: cmd_env.update(env) - if output is None: - out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd) - else: - status = _RunCmdFile(cmd, cmd_env, shell, output, cwd) - out = err = "" + try: + if output is None: + out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd) + else: + status = _RunCmdFile(cmd, cmd_env, shell, output, cwd) + out = err = "" + except OSError, err: + if err.errno == errno.ENOENT: + raise errors.OpExecError("Can't execute '%s': not found (%s)" % + (strcmd, err)) + else: + raise if status >= 0: exitcode = status @@ -1289,6 +1296,7 @@ def WriteFile(file_name, fn=None, data=None, dir_name, base_name = os.path.split(file_name) fd, new_name = tempfile.mkstemp('.new', base_name, dir_name) + do_remove = True # here we need to make sure we remove the temp file, if any error # leaves it in place try: @@ -1309,13 +1317,15 @@ def WriteFile(file_name, fn=None, data=None, os.utime(new_name, (atime, mtime)) if not dry_run: os.rename(new_name, file_name) + do_remove = False finally: if close: os.close(fd) result = None else: result = fd - RemoveFile(new_name) + if do_remove: + RemoveFile(new_name) return result @@ -1823,6 +1833,16 @@ def SafeEncode(text): return text +def CommaJoin(names): + """Nicely join a set of identifiers. + + @param names: set, list or tuple + @return: a string with the formatted results + + """ + return ", ".join(["'%s'" % val for val in names]) + + def LockedMethod(fn): """Synchronized object access decorator. diff --git a/man/gnt-debug.sgml b/man/gnt-debug.sgml index 2e624b053c8c334dd07b0c531d3f528453a0194a..89fd9e4a1f3aecc63e36d2e9a7864f5c352781b3 100644 --- a/man/gnt-debug.sgml +++ b/man/gnt-debug.sgml @@ -146,12 +146,12 @@ <cmdsynopsis> <command>submit-job</command> - <arg choice="req">opcodes_file</arg> + <arg choice="req" rep="repeat">opcodes_file</arg> </cmdsynopsis> <para> - This command builds a list of opcodes from a JSON-format file - and submits them as a single job to the master daemon. It can + This command builds a list of opcodes from JSON-format files + and submits for each file a job to the master daemon. It can be used to test some options that are not available via the command line. </para> diff --git a/man/gnt-instance.sgml b/man/gnt-instance.sgml index 176f70c0d737cac032e1bbe3f9d21be11b4a45ae..edf267eecac141efb276cbcf64f7983ce783218b 100644 --- a/man/gnt-instance.sgml +++ b/man/gnt-instance.sgml @@ -1127,7 +1127,7 @@ instance5: 11225 <para> Show detailed information about the given instance(s). This is different from <command>list</command> as it shows detailed data - about the instance's disks (especially useful for the drbd disk + about the instance's disks (especially useful for the drbd disk template). </para> @@ -1314,6 +1314,9 @@ instance5: 11225 <arg>--all</arg> </group> <sbr> + <arg>-H <option>key=value...</option></arg> + <arg>-B <option>key=value...</option></arg> + <sbr> <arg>--submit</arg> <sbr> <arg choice="opt" @@ -1382,6 +1385,23 @@ instance5: 11225 instance will be affected. </para> + <para> + The <option>-H</option> and <option>-B</option> options + specify extra, temporary hypervisor and backend parameters + that can be used to start an instance with modified + parameters. They can be useful for quick testing without + having to modify an instance back and forth, e.g.: + <screen> +# gnt-instance start -H root_args="single" instance1 +# gnt-instance start -B memory=2048 instance2 + </screen> + The first form will start the instance + <userinput>instance1</userinput> in single-user mode, and + the instance <userinput>instance2</userinput> with 2GB of + RAM (this time only, unless that is the actual instance + memory size already). + </para> + <para> The <option>--submit</option> option is used to send the job to the master daemon but not wait for its completion. The job diff --git a/scripts/gnt-cluster b/scripts/gnt-cluster index c8473fa16cb26bae2a52ecb3cd3c8efbbc7e651b..99cab31f28a71dbf7ae616dfb24bbf7d9c899cc8 100755 --- a/scripts/gnt-cluster +++ b/scripts/gnt-cluster @@ -243,6 +243,10 @@ def ShowClusterConfig(opts, args): ToStdout("Cluster parameters:") ToStdout(" - candidate pool size: %s", result["candidate_pool_size"]) + ToStdout(" - master netdev: %s", result["master_netdev"]) + ToStdout(" - default bridge: %s", result["default_bridge"]) + ToStdout(" - lvm volume group: %s", result["volume_group_name"]) + ToStdout(" - file storage path: %s", result["file_storage_dir"]) ToStdout("Default instance parameters:") for gr_name, gr_dict in result["beparams"].items(): @@ -463,6 +467,8 @@ def SetClusterParams(opts, args): if not opts.lvm_storage and opts.vg_name: ToStdout("Options --no-lvm-storage and --vg-name conflict.") return 1 + elif not opts.lvm_storage: + vg_name = '' hvlist = opts.enabled_hypervisors if hvlist is not None: @@ -476,7 +482,7 @@ def SetClusterParams(opts, args): beparams = opts.beparams utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) - op = opcodes.OpSetClusterParams(vg_name=opts.vg_name, + op = opcodes.OpSetClusterParams(vg_name=vg_name, enabled_hypervisors=hvlist, hvparams=hvparams, beparams=beparams, diff --git a/scripts/gnt-debug b/scripts/gnt-debug index ff7e01b307474d5085c8e48440cf6f8418eefcea..d3bf05450c3c08cc6ad94de10d744dbc4d3273cd 100755 --- a/scripts/gnt-debug +++ b/scripts/gnt-debug @@ -71,12 +71,19 @@ def GenericOpCodes(opts, args): """ cl = cli.GetClient() - fname = args[0] - op_data = simplejson.loads(open(fname).read()) - op_list = [opcodes.OpCode.LoadOpCode(val) for val in op_data] - jid = cli.SendJob(op_list, cl=cl) - ToStdout("Job id: %s", jid) - cli.PollJob(jid, cl=cl) + job_data = [] + job_ids = [] + for fname in args: + op_data = simplejson.loads(open(fname).read()) + op_list = [opcodes.OpCode.LoadOpCode(val) for val in op_data] + job_data.append((fname, op_list)) + for fname, op_list in job_data: + jid = cli.SendJob(op_list, cl=cl) + ToStdout("File '%s', job id: %s", fname, jid) + job_ids.append(jid) + for jid in job_ids: + ToStdout("Waiting for job id %s", jid) + cli.PollJob(jid, cl=cl) return 0 @@ -139,11 +146,11 @@ commands = { help="Select nodes to sleep on"), ], "[opts...] <duration>", "Executes a TestDelay OpCode"), - 'submit-job': (GenericOpCodes, ARGS_ONE, + 'submit-job': (GenericOpCodes, ARGS_ATLEAST(1), [DEBUG_OPT, ], - "<op_list_file>", "Submits a job built from a json-file" - " with a list of serialized opcodes"), + "<op_list_file...>", "Submits jobs built from json files" + " containing a list of serialized opcodes"), 'allocator': (TestAllocator, ARGS_ONE, [DEBUG_OPT, make_option("--dir", dest="direction", diff --git a/scripts/gnt-instance b/scripts/gnt-instance index 62a1f038e59f88a74e6e3ec07c08384d228e37a9..7b93e3bc150b53eb6663cb5e5560a05fb6055822 100755 --- a/scripts/gnt-instance +++ b/scripts/gnt-instance @@ -710,6 +710,11 @@ def StartupInstance(opts, args): for name in inames: op = opcodes.OpStartupInstance(instance_name=name, force=opts.force) + # do not add these parameters to the opcode unless they're defined + if opts.hvparams: + op.hvparams = opts.hvparams + if opts.beparams: + op.beparams = opts.beparams jex.QueueJob(name, op) jex.WaitOrShow(not opts.submit_only) return 0 @@ -1472,8 +1477,14 @@ commands = { m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT, + keyval_option("-H", "--hypervisor", type="keyval", + default={}, dest="hvparams", + help="Temporary hypervisor parameters"), + keyval_option("-B", "--backend", type="keyval", + default={}, dest="beparams", + help="Temporary backend parameters"), ], - "<instance>", "Starts an instance"), + "<instance>", "Starts an instance"), 'reboot': (RebootInstance, ARGS_ANY, [DEBUG_OPT, m_force_multi, diff --git a/scripts/gnt-job b/scripts/gnt-job index ce81709e15e2984d8689ea652cb574a0c23ea805..2da75a35b1da77b5ccf864de88ccfbc2aa0d95d3 100755 --- a/scripts/gnt-job +++ b/scripts/gnt-job @@ -85,7 +85,11 @@ def ListJobs(opts, args): headers = None # change raw values to nicer strings - for row in output: + for row_id, row in enumerate(output): + if row is None: + ToStderr("No such job: %s" % args[row_id]) + continue + for idx, field in enumerate(selected_fields): val = row[idx] if field == "status": diff --git a/test/data/bdev-8.3-both.txt b/test/data/bdev-8.3-both.txt new file mode 100644 index 0000000000000000000000000000000000000000..bc6e741cda853fe10ee910aa66170bcd4ab2308d --- /dev/null +++ b/test/data/bdev-8.3-both.txt @@ -0,0 +1,36 @@ +disk { + size 0s _is_default; # bytes + on-io-error detach; + fencing dont-care _is_default; + max-bio-bvecs 0 _is_default; +} +net { + timeout 60 _is_default; # 1/10 seconds + max-epoch-size 2048 _is_default; + max-buffers 2048 _is_default; + unplug-watermark 128 _is_default; + connect-int 10 _is_default; # seconds + ping-int 10 _is_default; # seconds + sndbuf-size 131070 _is_default; # bytes + ko-count 0 _is_default; + after-sb-0pri discard-zero-changes; + after-sb-1pri consensus; + after-sb-2pri disconnect _is_default; + rr-conflict disconnect _is_default; + ping-timeout 5 _is_default; # 1/10 seconds +} +syncer { + rate 61440k; # bytes/second + after -1 _is_default; + al-extents 257; +} +protocol C; +_this_host { + device minor 0; + disk "/dev/xenvg/test.data"; + meta-disk "/dev/xenvg/test.meta" [ 0 ]; + address ipv4 192.168.1.1:11000; +} +_remote_host { + address ipv4 192.168.1.2:11000; +} diff --git a/test/data/proc_drbd83.txt b/test/data/proc_drbd83.txt new file mode 100644 index 0000000000000000000000000000000000000000..114944c723814c8d59a9617d61f2370d59795b50 Binary files /dev/null and b/test/data/proc_drbd83.txt differ diff --git a/test/ganeti.bdev_unittest.py b/test/ganeti.bdev_unittest.py index 2db785c7d27daecdc6c5cf7ab32f7678be115ac1..b2299b18d481eed28302323743072481ddd6a529 100755 --- a/test/ganeti.bdev_unittest.py +++ b/test/ganeti.bdev_unittest.py @@ -61,7 +61,7 @@ class TestDRBD8Runner(testutils.GanetiTestCase): """Test drbdsetup show parser creation""" bdev.DRBD8._GetShowParser() - def testParserBoth(self): + def testParserBoth80(self): """Test drbdsetup show parser for disk and network""" data = self._ReadTestData("bdev-both.txt") result = bdev.DRBD8._GetDevInfo(data) @@ -70,7 +70,18 @@ class TestDRBD8Runner(testutils.GanetiTestCase): "Wrong local disk info") self.failUnless(self._has_net(result, ("192.168.1.1", 11000), ("192.168.1.2", 11000)), - "Wrong network info") + "Wrong network info (8.0.x)") + + def testParserBoth83(self): + """Test drbdsetup show parser for disk and network""" + data = self._ReadTestData("bdev-8.3-both.txt") + result = bdev.DRBD8._GetDevInfo(data) + self.failUnless(self._has_disk(result, "/dev/xenvg/test.data", + "/dev/xenvg/test.meta"), + "Wrong local disk info") + self.failUnless(self._has_net(result, ("192.168.1.1", 11000), + ("192.168.1.2", 11000)), + "Wrong network info (8.2.x)") def testParserNet(self): """Test drbdsetup show parser for disk and network""" @@ -103,8 +114,11 @@ class TestDRBD8Status(testutils.GanetiTestCase): """Read in txt data""" testutils.GanetiTestCase.setUp(self) proc_data = self._TestDataFilename("proc_drbd8.txt") + proc83_data = self._TestDataFilename("proc_drbd83.txt") self.proc_data = bdev.DRBD8._GetProcData(filename=proc_data) + self.proc83_data = bdev.DRBD8._GetProcData(filename=proc83_data) self.mass_data = bdev.DRBD8._MassageProcData(self.proc_data) + self.mass83_data = bdev.DRBD8._MassageProcData(self.proc83_data) def testIOErrors(self): """Test handling of errors while reading the proc file.""" @@ -116,6 +130,7 @@ class TestDRBD8Status(testutils.GanetiTestCase): def testMinorNotFound(self): """Test not-found-minor in /proc""" self.failUnless(9 not in self.mass_data) + self.failUnless(9 not in self.mass83_data) def testLineNotMatch(self): """Test wrong line passed to DRBD8Status""" @@ -123,45 +138,51 @@ class TestDRBD8Status(testutils.GanetiTestCase): def testMinor0(self): """Test connected, primary device""" - stats = bdev.DRBD8Status(self.mass_data[0]) - self.failUnless(stats.is_in_use) - self.failUnless(stats.is_connected and stats.is_primary and - stats.peer_secondary and stats.is_disk_uptodate) + for data in [self.mass_data, self.mass83_data]: + stats = bdev.DRBD8Status(data[0]) + self.failUnless(stats.is_in_use) + self.failUnless(stats.is_connected and stats.is_primary and + stats.peer_secondary and stats.is_disk_uptodate) def testMinor1(self): """Test connected, secondary device""" - stats = bdev.DRBD8Status(self.mass_data[1]) - self.failUnless(stats.is_in_use) - self.failUnless(stats.is_connected and stats.is_secondary and - stats.peer_primary and stats.is_disk_uptodate) + for data in [self.mass_data, self.mass83_data]: + stats = bdev.DRBD8Status(data[1]) + self.failUnless(stats.is_in_use) + self.failUnless(stats.is_connected and stats.is_secondary and + stats.peer_primary and stats.is_disk_uptodate) def testMinor2(self): """Test unconfigured device""" - stats = bdev.DRBD8Status(self.mass_data[2]) - self.failIf(stats.is_in_use) + for data in [self.mass_data, self.mass83_data]: + stats = bdev.DRBD8Status(data[2]) + self.failIf(stats.is_in_use) def testMinor4(self): """Test WFconn device""" - stats = bdev.DRBD8Status(self.mass_data[4]) - self.failUnless(stats.is_in_use) - self.failUnless(stats.is_wfconn and stats.is_primary and - stats.rrole == 'Unknown' and - stats.is_disk_uptodate) + for data in [self.mass_data, self.mass83_data]: + stats = bdev.DRBD8Status(data[4]) + self.failUnless(stats.is_in_use) + self.failUnless(stats.is_wfconn and stats.is_primary and + stats.rrole == 'Unknown' and + stats.is_disk_uptodate) def testMinor6(self): """Test diskless device""" - stats = bdev.DRBD8Status(self.mass_data[6]) - self.failUnless(stats.is_in_use) - self.failUnless(stats.is_connected and stats.is_secondary and - stats.peer_primary and stats.is_diskless) + for data in [self.mass_data, self.mass83_data]: + stats = bdev.DRBD8Status(data[6]) + self.failUnless(stats.is_in_use) + self.failUnless(stats.is_connected and stats.is_secondary and + stats.peer_primary and stats.is_diskless) def testMinor8(self): """Test standalone device""" - stats = bdev.DRBD8Status(self.mass_data[8]) - self.failUnless(stats.is_in_use) - self.failUnless(stats.is_standalone and - stats.rrole == 'Unknown' and - stats.is_disk_uptodate) + for data in [self.mass_data, self.mass83_data]: + stats = bdev.DRBD8Status(data[8]) + self.failUnless(stats.is_in_use) + self.failUnless(stats.is_standalone and + stats.rrole == 'Unknown' and + stats.is_disk_uptodate) if __name__ == '__main__': unittest.main()