Commit e18c6c47 authored by Iustin Pop's avatar Iustin Pop
Browse files

Merge branch 'devel-2.4'



* devel-2.4:
  Another fix for LUClusterVerifyDisks
  QA: also run gnt-cluster verify-disks
  Fix disk adoption breakage
  Fix typo in query2 design document
  Improve documentation for QRFS_UNAVAIL
  lvmstrap: add PV-on-partition support
  lvmstrap: abstract a little the sysfs paths
  lvmstrap: ignore small-sized partitions
  lvmstrap: add explicit test for swap backends
  lvmstrap: add an explicit test for mounted devices
  lvmstrap: add more excluded FS types
  lvmstrap: fix very old contact information
  Instance query: replace duplicates with aliases
  query: Add alias support in _PrepareFieldList
  Fix disk count check in LUSetInstanceParams
  Document iallocator change (alloc_policy)

Conflicts:
        lib/constants.py (due to QRFS→RS on master and doc on 2.4)
Signed-off-by: default avatarIustin Pop <iustin@google.com>
Reviewed-by: default avatarRené Nussbaumer <rn@google.com>
parents cfb084ae 075b62ca
...@@ -337,7 +337,7 @@ as filters. Two new calls are introduced: ...@@ -337,7 +337,7 @@ as filters. Two new calls are introduced:
Execute a query on items, optionally filtered. Takes a single Execute a query on items, optionally filtered. Takes a single
parameter, a :ref:`query object <data-query-request>` encoded as a parameter, a :ref:`query object <data-query-request>` encoded as a
dictionary and returns a :ref:`data query response dictionary and returns a :ref:`data query response
<data-query-response`. <data-query-response>`.
``QueryFields`` ``QueryFields``
Return list of supported fields as :ref:`field definitions Return list of supported fields as :ref:`field definitions
<field-def>`. Takes a single parameter, a :ref:`fields query object <field-def>`. Takes a single parameter, a :ref:`fields query object
......
...@@ -178,6 +178,8 @@ nodegroups ...@@ -178,6 +178,8 @@ nodegroups
name name
the node group name the node group name
alloc_policy
the allocation policy of the node group
instances instances
a dictionary with the data for the current existing instance on the a dictionary with the data for the current existing instance on the
......
...@@ -2405,7 +2405,7 @@ class LUClusterVerifyDisks(NoHooksLU): ...@@ -2405,7 +2405,7 @@ class LUClusterVerifyDisks(NoHooksLU):
""" """
result = res_nodes, res_instances, res_missing = {}, [], {} result = res_nodes, res_instances, res_missing = {}, [], {}
nodes = utils.NiceSort(self.cfg.GetNodeList()) nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
instances = [self.cfg.GetInstanceInfo(name) instances = [self.cfg.GetInstanceInfo(name)
for name in self.cfg.GetInstanceList()] for name in self.cfg.GetInstanceList()]
...@@ -2425,7 +2425,8 @@ class LUClusterVerifyDisks(NoHooksLU): ...@@ -2425,7 +2425,8 @@ class LUClusterVerifyDisks(NoHooksLU):
return result return result
vg_names = self.rpc.call_vg_list(nodes) vg_names = self.rpc.call_vg_list(nodes)
vg_names.Raise("Cannot get list of VGs") for node in nodes:
vg_names[node].Raise("Cannot get list of VGs")
for node in nodes: for node in nodes:
# node_volume # node_volume
...@@ -7444,12 +7445,11 @@ class LUInstanceCreate(LogicalUnit): ...@@ -7444,12 +7445,11 @@ class LUInstanceCreate(LogicalUnit):
raise errors.OpPrereqError("LV named %s used by another instance" % raise errors.OpPrereqError("LV named %s used by another instance" %
lv_name, errors.ECODE_NOTUNIQUE) lv_name, errors.ECODE_NOTUNIQUE)
vg_names = self.rpc.call_vg_list([pnode.name]) vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
vg_names.Raise("Cannot get VG information from node %s" % pnode.name) vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
node_lvs = self.rpc.call_lv_list([pnode.name], node_lvs = self.rpc.call_lv_list([pnode.name],
vg_names[pnode.name].payload.keys() vg_names.payload.keys())[pnode.name]
)[pnode.name]
node_lvs.Raise("Cannot get LV information from node %s" % pnode.name) node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
node_lvs = node_lvs.payload node_lvs = node_lvs.payload
...@@ -9253,7 +9253,7 @@ class LUInstanceSetParams(LogicalUnit): ...@@ -9253,7 +9253,7 @@ class LUInstanceSetParams(LogicalUnit):
_CheckInstanceDown(self, instance, "cannot remove disks") _CheckInstanceDown(self, instance, "cannot remove disks")
if (disk_op == constants.DDM_ADD and if (disk_op == constants.DDM_ADD and
len(instance.nics) >= constants.MAX_DISKS): len(instance.disks) >= constants.MAX_DISKS):
raise errors.OpPrereqError("Instance has too many disks (%d), cannot" raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
" add more" % constants.MAX_DISKS, " add more" % constants.MAX_DISKS,
errors.ECODE_STATE) errors.ECODE_STATE)
......
# #
# #
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc. # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
# #
# This program is free software; you can redistribute it and/or modify # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by # it under the terms of the GNU General Public License as published by
...@@ -1297,6 +1297,15 @@ class ConfigWriter: ...@@ -1297,6 +1297,15 @@ class ConfigWriter:
""" """
return self._UnlockedGetOnlineNodeList() return self._UnlockedGetOnlineNodeList()
@locking.ssynchronized(_config_lock, shared=1)
def GetVmCapableNodeList(self):
"""Return the list of nodes which are not vm capable.
"""
all_nodes = [self._UnlockedGetNodeInfo(node)
for node in self._UnlockedGetNodeList()]
return [node.name for node in all_nodes if node.vm_capable]
@locking.ssynchronized(_config_lock, shared=1) @locking.ssynchronized(_config_lock, shared=1)
def GetNonVmCapableNodeList(self): def GetNonVmCapableNodeList(self):
"""Return the list of nodes which are not vm capable. """Return the list of nodes which are not vm capable.
......
# #
# #
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc. # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
# #
# This program is free software; you can redistribute it and/or modify # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by # it under the terms of the GNU General Public License as published by
...@@ -1005,7 +1005,9 @@ RS_NORMAL = 0 ...@@ -1005,7 +1005,9 @@ RS_NORMAL = 0
RS_UNKNOWN = 1 RS_UNKNOWN = 1
#: No data (e.g. RPC error), can be used instead of L{RS_OFFLINE} #: No data (e.g. RPC error), can be used instead of L{RS_OFFLINE}
RS_NODATA = 2 RS_NODATA = 2
#: Value unavailable for item #: Value unavailable/unsupported for item; if this field is supported
#: but we cannot get the data for the moment, RS_NODATA or
#: RS_OFFLINE should be used
RS_UNAVAIL = 3 RS_UNAVAIL = 3
#: Resource marked offline #: Resource marked offline
RS_OFFLINE = 4 RS_OFFLINE = 4
......
...@@ -277,14 +277,18 @@ def _VerifyResultRow(fields, row): ...@@ -277,14 +277,18 @@ def _VerifyResultRow(fields, row):
(utils.CommaJoin(errors), row)) (utils.CommaJoin(errors), row))
def _PrepareFieldList(fields): def _PrepareFieldList(fields, aliases):
"""Prepares field list for use by L{Query}. """Prepares field list for use by L{Query}.
Converts the list to a dictionary and does some verification. Converts the list to a dictionary and does some verification.
@type fields: list of tuples; (L{objects.QueryFieldDefinition}, data kind, @type fields: list of tuples; (L{objects.QueryFieldDefinition}, data
retrieval function) kind, retrieval function)
@param fields: List of fields, see L{Query.__init__} for a better description @param fields: List of fields, see L{Query.__init__} for a better
description
@type aliases: list of tuples; (alias, target)
@param aliases: list of tuples containing aliases; for each
alias/target pair, a duplicate will be created in the field list
@rtype: dict @rtype: dict
@return: Field dictionary for L{Query} @return: Field dictionary for L{Query}
...@@ -308,7 +312,15 @@ def _PrepareFieldList(fields): ...@@ -308,7 +312,15 @@ def _PrepareFieldList(fields):
result[fdef.name] = field result[fdef.name] = field
assert len(result) == len(fields) for alias, target in aliases:
assert alias not in result, "Alias %s overrides an existing field" % alias
assert target in result, "Missing target %s for alias %s" % (target, alias)
(fdef, k, fn) = result[target]
fdef = fdef.Copy()
fdef.name = alias
result[alias] = (fdef, k, fn)
assert len(result) == len(fields) + len(aliases)
assert compat.all(name == fdef.name assert compat.all(name == fdef.name
for (name, (fdef, _, _)) in result.items()) for (name, (fdef, _, _)) in result.items())
...@@ -644,7 +656,7 @@ def _BuildNodeFields(): ...@@ -644,7 +656,7 @@ def _BuildNodeFields():
# Add timestamps # Add timestamps
fields.extend(_GetItemTimestampFields(NQ_CONFIG)) fields.extend(_GetItemTimestampFields(NQ_CONFIG))
return _PrepareFieldList(fields) return _PrepareFieldList(fields, [])
class InstanceQueryData: class InstanceQueryData:
...@@ -983,10 +995,6 @@ def _GetInstanceDiskFields(): ...@@ -983,10 +995,6 @@ def _GetInstanceDiskFields():
fields = [ fields = [
(_MakeField("disk_usage", "DiskUsage", QFT_UNIT), IQ_DISKUSAGE, (_MakeField("disk_usage", "DiskUsage", QFT_UNIT), IQ_DISKUSAGE,
_GetInstDiskUsage), _GetInstDiskUsage),
(_MakeField("sda_size", "LegacyDisk/0", QFT_UNIT), IQ_CONFIG,
_GetInstDiskSize(0)),
(_MakeField("sdb_size", "LegacyDisk/1", QFT_UNIT), IQ_CONFIG,
_GetInstDiskSize(1)),
(_MakeField("disk.count", "Disks", QFT_NUMBER), IQ_CONFIG, (_MakeField("disk.count", "Disks", QFT_NUMBER), IQ_CONFIG,
lambda ctx, inst: len(inst.disks)), lambda ctx, inst: len(inst.disks)),
(_MakeField("disk.sizes", "Disk_sizes", QFT_OTHER), IQ_CONFIG, (_MakeField("disk.sizes", "Disk_sizes", QFT_OTHER), IQ_CONFIG,
...@@ -1034,8 +1042,6 @@ def _GetInstanceParameterFields(): ...@@ -1034,8 +1042,6 @@ def _GetInstanceParameterFields():
IQ_CONFIG, lambda ctx, _: ctx.inst_hvparams), IQ_CONFIG, lambda ctx, _: ctx.inst_hvparams),
(_MakeField("beparams", "BackendParameters", QFT_OTHER), (_MakeField("beparams", "BackendParameters", QFT_OTHER),
IQ_CONFIG, lambda ctx, _: ctx.inst_beparams), IQ_CONFIG, lambda ctx, _: ctx.inst_beparams),
(_MakeField("vcpus", "LegacyVCPUs", QFT_NUMBER), IQ_CONFIG,
lambda ctx, _: ctx.inst_beparams[constants.BE_VCPUS]),
# Unfilled parameters # Unfilled parameters
(_MakeField("custom_hvparams", "CustomHypervisorParameters", QFT_OTHER), (_MakeField("custom_hvparams", "CustomHypervisorParameters", QFT_OTHER),
...@@ -1119,7 +1125,13 @@ def _BuildInstanceFields(): ...@@ -1119,7 +1125,13 @@ def _BuildInstanceFields():
fields.extend(_GetInstanceNetworkFields()) fields.extend(_GetInstanceNetworkFields())
fields.extend(_GetItemTimestampFields(IQ_CONFIG)) fields.extend(_GetItemTimestampFields(IQ_CONFIG))
return _PrepareFieldList(fields) aliases = [
("vcpus", "be/vcpus"),
("sda_size", "disk.size/0"),
("sdb_size", "disk.size/1"),
]
return _PrepareFieldList(fields, aliases)
class LockQueryData: class LockQueryData:
...@@ -1175,7 +1187,7 @@ def _BuildLockFields(): ...@@ -1175,7 +1187,7 @@ def _BuildLockFields():
lambda ctx, (name, mode, owners, pending): mode), lambda ctx, (name, mode, owners, pending): mode),
(_MakeField("owner", "Owner", QFT_OTHER), LQ_OWNER, _GetLockOwners), (_MakeField("owner", "Owner", QFT_OTHER), LQ_OWNER, _GetLockOwners),
(_MakeField("pending", "Pending", QFT_OTHER), LQ_PENDING, _GetLockPending), (_MakeField("pending", "Pending", QFT_OTHER), LQ_PENDING, _GetLockPending),
]) ], [])
class GroupQueryData: class GroupQueryData:
...@@ -1247,7 +1259,7 @@ def _BuildGroupFields(): ...@@ -1247,7 +1259,7 @@ def _BuildGroupFields():
fields.extend(_GetItemTimestampFields(GQ_CONFIG)) fields.extend(_GetItemTimestampFields(GQ_CONFIG))
return _PrepareFieldList(fields) return _PrepareFieldList(fields, [])
#: Fields available for node queries #: Fields available for node queries
......
...@@ -153,6 +153,7 @@ def TestClusterOob(): ...@@ -153,6 +153,7 @@ def TestClusterOob():
def TestClusterVerify(): def TestClusterVerify():
"""gnt-cluster verify""" """gnt-cluster verify"""
AssertCommand(["gnt-cluster", "verify"]) AssertCommand(["gnt-cluster", "verify"])
AssertCommand(["gnt-cluster", "verify-disks"])
def TestJobqueue(): def TestJobqueue():
......
...@@ -74,7 +74,7 @@ class TestQuery(unittest.TestCase): ...@@ -74,7 +74,7 @@ class TestQuery(unittest.TestCase):
[(query._MakeField("disk%s.size" % i, "DiskSize%s" % i, [(query._MakeField("disk%s.size" % i, "DiskSize%s" % i,
constants.QFT_UNIT), constants.QFT_UNIT),
DISK, compat.partial(_GetDiskSize, i)) DISK, compat.partial(_GetDiskSize, i))
for i in range(4)]) for i in range(4)], [])
q = query.Query(fielddef, ["name"]) q = query.Query(fielddef, ["name"])
self.assertEqual(q.RequestedData(), set([STATIC])) self.assertEqual(q.RequestedData(), set([STATIC]))
...@@ -176,40 +176,40 @@ class TestQuery(unittest.TestCase): ...@@ -176,40 +176,40 @@ class TestQuery(unittest.TestCase):
lambda *args: None), lambda *args: None),
(query._MakeField("other", a, constants.QFT_TEXT), None, (query._MakeField("other", a, constants.QFT_TEXT), None,
lambda *args: None), lambda *args: None),
]) ], [])
# Non-lowercase names # Non-lowercase names
self.assertRaises(AssertionError, query._PrepareFieldList, [ self.assertRaises(AssertionError, query._PrepareFieldList, [
(query._MakeField("NAME", "Name", constants.QFT_TEXT), None, (query._MakeField("NAME", "Name", constants.QFT_TEXT), None,
lambda *args: None), lambda *args: None),
]) ], [])
self.assertRaises(AssertionError, query._PrepareFieldList, [ self.assertRaises(AssertionError, query._PrepareFieldList, [
(query._MakeField("Name", "Name", constants.QFT_TEXT), None, (query._MakeField("Name", "Name", constants.QFT_TEXT), None,
lambda *args: None), lambda *args: None),
]) ], [])
# Empty name # Empty name
self.assertRaises(AssertionError, query._PrepareFieldList, [ self.assertRaises(AssertionError, query._PrepareFieldList, [
(query._MakeField("", "Name", constants.QFT_TEXT), None, (query._MakeField("", "Name", constants.QFT_TEXT), None,
lambda *args: None), lambda *args: None),
]) ], [])
# Empty title # Empty title
self.assertRaises(AssertionError, query._PrepareFieldList, [ self.assertRaises(AssertionError, query._PrepareFieldList, [
(query._MakeField("name", "", constants.QFT_TEXT), None, (query._MakeField("name", "", constants.QFT_TEXT), None,
lambda *args: None), lambda *args: None),
]) ], [])
# Whitespace in title # Whitespace in title
self.assertRaises(AssertionError, query._PrepareFieldList, [ self.assertRaises(AssertionError, query._PrepareFieldList, [
(query._MakeField("name", "Co lu mn", constants.QFT_TEXT), None, (query._MakeField("name", "Co lu mn", constants.QFT_TEXT), None,
lambda *args: None), lambda *args: None),
]) ], [])
# No callable function # No callable function
self.assertRaises(AssertionError, query._PrepareFieldList, [ self.assertRaises(AssertionError, query._PrepareFieldList, [
(query._MakeField("name", "Name", constants.QFT_TEXT), None, None), (query._MakeField("name", "Name", constants.QFT_TEXT), None, None),
]) ], [])
def testUnknown(self): def testUnknown(self):
fielddef = query._PrepareFieldList([ fielddef = query._PrepareFieldList([
...@@ -221,7 +221,7 @@ class TestQuery(unittest.TestCase): ...@@ -221,7 +221,7 @@ class TestQuery(unittest.TestCase):
None, lambda *args: query._FS_NODATA ), None, lambda *args: query._FS_NODATA ),
(query._MakeField("unavail", "Unavail", constants.QFT_BOOL), (query._MakeField("unavail", "Unavail", constants.QFT_BOOL),
None, lambda *args: query._FS_UNAVAIL), None, lambda *args: query._FS_UNAVAIL),
]) ], [])
for selected in [["foo"], ["Hello", "World"], for selected in [["foo"], ["Hello", "World"],
["name1", "other", "foo"]]: ["name1", "other", "foo"]]:
...@@ -254,6 +254,25 @@ class TestQuery(unittest.TestCase): ...@@ -254,6 +254,25 @@ class TestQuery(unittest.TestCase):
(constants.RS_UNKNOWN, None)] (constants.RS_UNKNOWN, None)]
for i in range(1, 10)]) for i in range(1, 10)])
def testAliases(self):
fields = [
(query._MakeField("a", "a-title", constants.QFT_TEXT), None,
lambda *args: None),
(query._MakeField("b", "b-title", constants.QFT_TEXT), None,
lambda *args: None),
]
# duplicate field
self.assertRaises(AssertionError, query._PrepareFieldList, fields,
[("b", "a")])
self.assertRaises(AssertionError, query._PrepareFieldList, fields,
[("c", "b"), ("c", "a")])
# missing target
self.assertRaises(AssertionError, query._PrepareFieldList, fields,
[("c", "d")])
fdefs = query._PrepareFieldList(fields, [("c", "b")])
self.assertEqual(len(fdefs), 3)
self.assertEqual(fdefs["b"][1:], fdefs["c"][1:])
class TestGetNodeRole(unittest.TestCase): class TestGetNodeRole(unittest.TestCase):
def testMaster(self): def testMaster(self):
......
...@@ -45,6 +45,7 @@ import sys ...@@ -45,6 +45,7 @@ import sys
import optparse import optparse
import time import time
import errno import errno
import re
from ganeti.utils import RunCmd, ReadFile from ganeti.utils import RunCmd, ReadFile
from ganeti import constants from ganeti import constants
...@@ -66,6 +67,25 @@ SUPPORTED_TYPES = [ ...@@ -66,6 +67,25 @@ SUPPORTED_TYPES = [
"ubd", "ubd",
] ]
#: Excluded filesystem types
EXCLUDED_FS = frozenset([
"nfs",
"nfs4",
"autofs",
"tmpfs",
"proc",
"sysfs",
"usbfs",
"devpts",
])
#: A regular expression that matches partitions (must be kept in sync
# with L{SUPPORTED_TYPES}
PART_RE = re.compile("^((?:h|s|m|ub)d[a-z]{1,2})[0-9]+$")
#: Minimum partition size to be considered (1 GB)
PART_MINSIZE = 1024 * 1024 * 1024
class Error(Exception): class Error(Exception):
"""Generic exception""" """Generic exception"""
...@@ -185,7 +205,7 @@ def IsPartitioned(disk): ...@@ -185,7 +205,7 @@ def IsPartitioned(disk):
Currently only md devices are used as is. Currently only md devices are used as is.
""" """
return not disk.startswith('md') return not (disk.startswith('md') or PART_RE.match(disk))
def DeviceName(disk): def DeviceName(disk):
...@@ -202,6 +222,17 @@ def DeviceName(disk): ...@@ -202,6 +222,17 @@ def DeviceName(disk):
return device return device
def SysfsName(disk):
"""Returns the sysfs name for a disk or partition.
"""
match = PART_RE.match(disk)
if match:
# this is a partition, which resides in /sys/block under a different name
disk = "%s/%s" % (match.group(1), disk)
return "/sys/block/%s" % disk
def ExecCommand(command): def ExecCommand(command):
"""Executes a command. """Executes a command.
...@@ -415,7 +446,8 @@ def GetDiskList(opts): ...@@ -415,7 +446,8 @@ def GetDiskList(opts):
if not compat.any([name.startswith(pfx) for pfx in SUPPORTED_TYPES]): if not compat.any([name.startswith(pfx) for pfx in SUPPORTED_TYPES]):
continue continue
size = ReadSize("/sys/block/%s" % name) disksysfsname = "/sys/block/%s" % name
size = ReadSize(disksysfsname)
f = open("/sys/block/%s/removable" % name) f = open("/sys/block/%s/removable" % name)
removable = int(f.read().strip()) removable = int(f.read().strip())
...@@ -424,18 +456,21 @@ def GetDiskList(opts): ...@@ -424,18 +456,21 @@ def GetDiskList(opts):
if removable and not opts.removable_ok: if removable and not opts.removable_ok:
continue continue
dev = ReadDev("/sys/block/%s" % name) dev = ReadDev(disksysfsname)
CheckSysDev(name, dev) CheckSysDev(name, dev)
inuse = InUse(name) inuse = InUse(name)
# Enumerate partitions of the block device # Enumerate partitions of the block device
partitions = [] partitions = []
for partname in os.listdir("/sys/block/%s" % name): for partname in os.listdir(disksysfsname):
if not partname.startswith(name): if not partname.startswith(name):
continue continue
partdev = ReadDev("/sys/block/%s/%s" % (name, partname)) partsysfsname = "%s/%s" % (disksysfsname, partname)
partsize = ReadSize("/sys/block/%s/%s" % (name, partname)) partdev = ReadDev(partsysfsname)
partsize = ReadSize(partsysfsname)
if partsize >= PART_MINSIZE:
CheckSysDev(partname, partdev) CheckSysDev(partname, partdev)
partitions.append((partname, partsize, partdev)) partinuse = InUse(partname)
partitions.append((partname, partsize, partdev, partinuse))
partitions.sort() partitions.sort()
dlist.append((name, size, dev, partitions, inuse)) dlist.append((name, size, dev, partitions, inuse))
dlist.sort() dlist.sort()
...@@ -460,7 +495,7 @@ def GetMountInfo(): ...@@ -460,7 +495,7 @@ def GetMountInfo():
for line in mountlines: for line in mountlines:
_, mountpoint, fstype, _ = line.split(None, 3) _, mountpoint, fstype, _ = line.split(None, 3)
# fs type blacklist # fs type blacklist
if fstype in ["nfs", "nfs4", "autofs", "tmpfs", "proc", "sysfs"]: if fstype in EXCLUDED_FS:
continue continue
try: try:
dev = os.stat(mountpoint).st_dev dev = os.stat(mountpoint).st_dev
...@@ -477,6 +512,14 @@ def GetMountInfo(): ...@@ -477,6 +512,14 @@ def GetMountInfo():
return mounts return mounts
def GetSwapInfo():
"""Reads /proc/swaps and returns the list of swap backing stores.
"""
swaplines = ReadFile("/proc/swaps").splitlines()[1:]
return [line.split(None, 1)[0] for line in swaplines]
def DevInfo(name, dev, mountinfo): def DevInfo(name, dev, mountinfo):
"""Computes miscellaneous information about a block device. """Computes miscellaneous information about a block device.
...@@ -511,6 +554,12 @@ def ShowDiskInfo(opts): ...@@ -511,6 +554,12 @@ def ShowDiskInfo(opts):
choice about which disks should be allocated to our volume group. choice about which disks should be allocated to our volume group.
""" """
def _inuse(inuse):
if inuse:
return "yes"
else:
return "no"
mounts = GetMountInfo() mounts = GetMountInfo()
dlist = GetDiskList(opts) dlist = GetDiskList(opts)
...@@ -528,13 +577,9 @@ def ShowDiskInfo(opts): ...@@ -528,13 +577,9 @@ def ShowDiskInfo(opts):