diff --git a/doc/design-query2.rst b/doc/design-query2.rst
index b069b26748fe4868a0904b25b9b68a6a44ee6664..8310ef1775d163c1c8281911fa96cb0dae702a5d 100644
--- a/doc/design-query2.rst
+++ b/doc/design-query2.rst
@@ -337,7 +337,7 @@ as filters. Two new calls are introduced:
   Execute a query on items, optionally filtered. Takes a single
   parameter, a :ref:`query object <data-query-request>` encoded as a
   dictionary and returns a :ref:`data query response
-  <data-query-response`.
+  <data-query-response>`.
 ``QueryFields``
   Return list of supported fields as :ref:`field definitions
   <field-def>`. Takes a single parameter, a :ref:`fields query object
diff --git a/doc/iallocator.rst b/doc/iallocator.rst
index d96671866a14c61d9e8077ef9082fe3ee12755a1..2436dd685740ad3c8debc59a7eb5426f8f648413 100644
--- a/doc/iallocator.rst
+++ b/doc/iallocator.rst
@@ -178,6 +178,8 @@ nodegroups
 
   name
     the node group name
+  alloc_policy
+    the allocation policy of the node group
 
 instances
   a dictionary with the data for the current existing instance on the
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index 8bd6d649c07b3eebafa6bf8a9c5a30677ba64228..43819496ef190adb3986b06de209bed3652ccfb2 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -2405,7 +2405,7 @@ class LUClusterVerifyDisks(NoHooksLU):
     """
     result = res_nodes, res_instances, res_missing = {}, [], {}
 
-    nodes = utils.NiceSort(self.cfg.GetNodeList())
+    nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
     instances = [self.cfg.GetInstanceInfo(name)
                  for name in self.cfg.GetInstanceList()]
 
@@ -2425,7 +2425,8 @@ class LUClusterVerifyDisks(NoHooksLU):
       return result
 
     vg_names = self.rpc.call_vg_list(nodes)
-    vg_names.Raise("Cannot get list of VGs")
+    for node in nodes:
+      vg_names[node].Raise("Cannot get list of VGs")
 
     for node in nodes:
       # node_volume
@@ -7444,12 +7445,11 @@ class LUInstanceCreate(LogicalUnit):
           raise errors.OpPrereqError("LV named %s used by another instance" %
                                      lv_name, errors.ECODE_NOTUNIQUE)
 
-      vg_names = self.rpc.call_vg_list([pnode.name])
+      vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
       vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
 
       node_lvs = self.rpc.call_lv_list([pnode.name],
-                                       vg_names[pnode.name].payload.keys()
-                                      )[pnode.name]
+                                       vg_names.payload.keys())[pnode.name]
       node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
       node_lvs = node_lvs.payload
 
@@ -9253,7 +9253,7 @@ class LUInstanceSetParams(LogicalUnit):
         _CheckInstanceDown(self, instance, "cannot remove disks")
 
       if (disk_op == constants.DDM_ADD and
-          len(instance.nics) >= constants.MAX_DISKS):
+          len(instance.disks) >= constants.MAX_DISKS):
         raise errors.OpPrereqError("Instance has too many disks (%d), cannot"
                                    " add more" % constants.MAX_DISKS,
                                    errors.ECODE_STATE)
diff --git a/lib/config.py b/lib/config.py
index 6116b8da1c638d660743b49e2514636240280fad..76e6b59dd8aee87dfd6befbf19c48e9f3047950e 100644
--- a/lib/config.py
+++ b/lib/config.py
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -1297,6 +1297,15 @@ class ConfigWriter:
     """
     return self._UnlockedGetOnlineNodeList()
 
+  @locking.ssynchronized(_config_lock, shared=1)
+  def GetVmCapableNodeList(self):
+    """Return the list of nodes which are not vm capable.
+
+    """
+    all_nodes = [self._UnlockedGetNodeInfo(node)
+                 for node in self._UnlockedGetNodeList()]
+    return [node.name for node in all_nodes if node.vm_capable]
+
   @locking.ssynchronized(_config_lock, shared=1)
   def GetNonVmCapableNodeList(self):
     """Return the list of nodes which are not vm capable.
diff --git a/lib/constants.py b/lib/constants.py
index c6adfb41ced31f888b779c870f55361f741a5a11..df51ef38d4863fdbc1da94e2145ffee19d0477a9 100644
--- a/lib/constants.py
+++ b/lib/constants.py
@@ -1,7 +1,7 @@
 #
 #
 
-# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -1005,7 +1005,9 @@ RS_NORMAL = 0
 RS_UNKNOWN = 1
 #: No data (e.g. RPC error), can be used instead of L{RS_OFFLINE}
 RS_NODATA = 2
-#: Value unavailable for item
+#: Value unavailable/unsupported for item; if this field is supported
+#: but we cannot get the data for the moment, RS_NODATA or
+#: RS_OFFLINE should be used
 RS_UNAVAIL = 3
 #: Resource marked offline
 RS_OFFLINE = 4
diff --git a/lib/query.py b/lib/query.py
index 8935f45533e613530212c92b3c70309ba1e0bd06..35f6d27ebf13c4a88bb88837622ea5628ab419e9 100644
--- a/lib/query.py
+++ b/lib/query.py
@@ -277,14 +277,18 @@ def _VerifyResultRow(fields, row):
                     (utils.CommaJoin(errors), row))
 
 
-def _PrepareFieldList(fields):
+def _PrepareFieldList(fields, aliases):
   """Prepares field list for use by L{Query}.
 
   Converts the list to a dictionary and does some verification.
 
-  @type fields: list of tuples; (L{objects.QueryFieldDefinition}, data kind,
-    retrieval function)
-  @param fields: List of fields, see L{Query.__init__} for a better description
+  @type fields: list of tuples; (L{objects.QueryFieldDefinition}, data
+      kind, retrieval function)
+  @param fields: List of fields, see L{Query.__init__} for a better
+      description
+  @type aliases: list of tuples; (alias, target)
+  @param aliases: list of tuples containing aliases; for each
+      alias/target pair, a duplicate will be created in the field list
   @rtype: dict
   @return: Field dictionary for L{Query}
 
@@ -308,7 +312,15 @@ def _PrepareFieldList(fields):
 
     result[fdef.name] = field
 
-  assert len(result) == len(fields)
+  for alias, target in aliases:
+    assert alias not in result, "Alias %s overrides an existing field" % alias
+    assert target in result, "Missing target %s for alias %s" % (target, alias)
+    (fdef, k, fn) = result[target]
+    fdef = fdef.Copy()
+    fdef.name = alias
+    result[alias] = (fdef, k, fn)
+
+  assert len(result) == len(fields) + len(aliases)
   assert compat.all(name == fdef.name
                     for (name, (fdef, _, _)) in result.items())
 
@@ -644,7 +656,7 @@ def _BuildNodeFields():
   # Add timestamps
   fields.extend(_GetItemTimestampFields(NQ_CONFIG))
 
-  return _PrepareFieldList(fields)
+  return _PrepareFieldList(fields, [])
 
 
 class InstanceQueryData:
@@ -983,10 +995,6 @@ def _GetInstanceDiskFields():
   fields = [
     (_MakeField("disk_usage", "DiskUsage", QFT_UNIT), IQ_DISKUSAGE,
      _GetInstDiskUsage),
-    (_MakeField("sda_size", "LegacyDisk/0", QFT_UNIT), IQ_CONFIG,
-     _GetInstDiskSize(0)),
-    (_MakeField("sdb_size", "LegacyDisk/1", QFT_UNIT), IQ_CONFIG,
-     _GetInstDiskSize(1)),
     (_MakeField("disk.count", "Disks", QFT_NUMBER), IQ_CONFIG,
      lambda ctx, inst: len(inst.disks)),
     (_MakeField("disk.sizes", "Disk_sizes", QFT_OTHER), IQ_CONFIG,
@@ -1034,8 +1042,6 @@ def _GetInstanceParameterFields():
      IQ_CONFIG, lambda ctx, _: ctx.inst_hvparams),
     (_MakeField("beparams", "BackendParameters", QFT_OTHER),
      IQ_CONFIG, lambda ctx, _: ctx.inst_beparams),
-    (_MakeField("vcpus", "LegacyVCPUs", QFT_NUMBER), IQ_CONFIG,
-     lambda ctx, _: ctx.inst_beparams[constants.BE_VCPUS]),
 
     # Unfilled parameters
     (_MakeField("custom_hvparams", "CustomHypervisorParameters", QFT_OTHER),
@@ -1119,7 +1125,13 @@ def _BuildInstanceFields():
   fields.extend(_GetInstanceNetworkFields())
   fields.extend(_GetItemTimestampFields(IQ_CONFIG))
 
-  return _PrepareFieldList(fields)
+  aliases = [
+    ("vcpus", "be/vcpus"),
+    ("sda_size", "disk.size/0"),
+    ("sdb_size", "disk.size/1"),
+    ]
+
+  return _PrepareFieldList(fields, aliases)
 
 
 class LockQueryData:
@@ -1175,7 +1187,7 @@ def _BuildLockFields():
      lambda ctx, (name, mode, owners, pending): mode),
     (_MakeField("owner", "Owner", QFT_OTHER), LQ_OWNER, _GetLockOwners),
     (_MakeField("pending", "Pending", QFT_OTHER), LQ_PENDING, _GetLockPending),
-    ])
+    ], [])
 
 
 class GroupQueryData:
@@ -1247,7 +1259,7 @@ def _BuildGroupFields():
 
   fields.extend(_GetItemTimestampFields(GQ_CONFIG))
 
-  return _PrepareFieldList(fields)
+  return _PrepareFieldList(fields, [])
 
 
 #: Fields available for node queries
diff --git a/qa/qa_cluster.py b/qa/qa_cluster.py
index 296f7c76379a4d5f8e4e98415c973eb78704faa7..176bcd9eb3597473f09cb58e1c452e3aa45fb75e 100644
--- a/qa/qa_cluster.py
+++ b/qa/qa_cluster.py
@@ -153,6 +153,7 @@ def TestClusterOob():
 def TestClusterVerify():
   """gnt-cluster verify"""
   AssertCommand(["gnt-cluster", "verify"])
+  AssertCommand(["gnt-cluster", "verify-disks"])
 
 
 def TestJobqueue():
diff --git a/test/ganeti.query_unittest.py b/test/ganeti.query_unittest.py
index a58a81a1e0ba505a22e74f6d4ce8793d56946981..024ff77ec7b8e08606ed01f217329018523aaa68 100755
--- a/test/ganeti.query_unittest.py
+++ b/test/ganeti.query_unittest.py
@@ -74,7 +74,7 @@ class TestQuery(unittest.TestCase):
       [(query._MakeField("disk%s.size" % i, "DiskSize%s" % i,
                          constants.QFT_UNIT),
         DISK, compat.partial(_GetDiskSize, i))
-       for i in range(4)])
+       for i in range(4)], [])
 
     q = query.Query(fielddef, ["name"])
     self.assertEqual(q.RequestedData(), set([STATIC]))
@@ -176,40 +176,40 @@ class TestQuery(unittest.TestCase):
          lambda *args: None),
         (query._MakeField("other", a, constants.QFT_TEXT), None,
          lambda *args: None),
-        ])
+        ], [])
 
     # Non-lowercase names
     self.assertRaises(AssertionError, query._PrepareFieldList, [
       (query._MakeField("NAME", "Name", constants.QFT_TEXT), None,
        lambda *args: None),
-      ])
+      ], [])
     self.assertRaises(AssertionError, query._PrepareFieldList, [
       (query._MakeField("Name", "Name", constants.QFT_TEXT), None,
        lambda *args: None),
-      ])
+      ], [])
 
     # Empty name
     self.assertRaises(AssertionError, query._PrepareFieldList, [
       (query._MakeField("", "Name", constants.QFT_TEXT), None,
        lambda *args: None),
-      ])
+      ], [])
 
     # Empty title
     self.assertRaises(AssertionError, query._PrepareFieldList, [
       (query._MakeField("name", "", constants.QFT_TEXT), None,
        lambda *args: None),
-      ])
+      ], [])
 
     # Whitespace in title
     self.assertRaises(AssertionError, query._PrepareFieldList, [
       (query._MakeField("name", "Co lu mn", constants.QFT_TEXT), None,
        lambda *args: None),
-      ])
+      ], [])
 
     # No callable function
     self.assertRaises(AssertionError, query._PrepareFieldList, [
       (query._MakeField("name", "Name", constants.QFT_TEXT), None, None),
-      ])
+      ], [])
 
   def testUnknown(self):
     fielddef = query._PrepareFieldList([
@@ -221,7 +221,7 @@ class TestQuery(unittest.TestCase):
        None, lambda *args: query._FS_NODATA ),
       (query._MakeField("unavail", "Unavail", constants.QFT_BOOL),
        None, lambda *args: query._FS_UNAVAIL),
-      ])
+      ], [])
 
     for selected in [["foo"], ["Hello", "World"],
                      ["name1", "other", "foo"]]:
@@ -254,6 +254,25 @@ class TestQuery(unittest.TestCase):
                        (constants.RS_UNKNOWN, None)]
                       for i in range(1, 10)])
 
+  def testAliases(self):
+    fields = [
+      (query._MakeField("a", "a-title", constants.QFT_TEXT), None,
+       lambda *args: None),
+      (query._MakeField("b", "b-title", constants.QFT_TEXT), None,
+       lambda *args: None),
+      ]
+    # duplicate field
+    self.assertRaises(AssertionError, query._PrepareFieldList, fields,
+                      [("b", "a")])
+    self.assertRaises(AssertionError, query._PrepareFieldList, fields,
+                      [("c", "b"), ("c", "a")])
+    # missing target
+    self.assertRaises(AssertionError, query._PrepareFieldList, fields,
+                      [("c", "d")])
+    fdefs = query._PrepareFieldList(fields, [("c", "b")])
+    self.assertEqual(len(fdefs), 3)
+    self.assertEqual(fdefs["b"][1:], fdefs["c"][1:])
+
 
 class TestGetNodeRole(unittest.TestCase):
   def testMaster(self):
diff --git a/tools/lvmstrap b/tools/lvmstrap
index 654548d269807b38d95b95d62487073201230c02..c27e8e3fd693e3bb86ed4b75c9804ddb14c5200e 100755
--- a/tools/lvmstrap
+++ b/tools/lvmstrap
@@ -45,6 +45,7 @@ import sys
 import optparse
 import time
 import errno
+import re
 
 from ganeti.utils import RunCmd, ReadFile
 from ganeti import constants
@@ -66,6 +67,25 @@ SUPPORTED_TYPES = [
   "ubd",
   ]
 
+#: Excluded filesystem types
+EXCLUDED_FS = frozenset([
+  "nfs",
+  "nfs4",
+  "autofs",
+  "tmpfs",
+  "proc",
+  "sysfs",
+  "usbfs",
+  "devpts",
+  ])
+
+#: A regular expression that matches partitions (must be kept in sync
+# with L{SUPPORTED_TYPES}
+PART_RE = re.compile("^((?:h|s|m|ub)d[a-z]{1,2})[0-9]+$")
+
+#: Minimum partition size to be considered (1 GB)
+PART_MINSIZE = 1024 * 1024 * 1024
+
 
 class Error(Exception):
   """Generic exception"""
@@ -185,7 +205,7 @@ def IsPartitioned(disk):
   Currently only md devices are used as is.
 
   """
-  return not disk.startswith('md')
+  return not (disk.startswith('md') or PART_RE.match(disk))
 
 
 def DeviceName(disk):
@@ -202,6 +222,17 @@ def DeviceName(disk):
   return device
 
 
+def SysfsName(disk):
+  """Returns the sysfs name for a disk or partition.
+
+  """
+  match = PART_RE.match(disk)
+  if match:
+    # this is a partition, which resides in /sys/block under a different name
+    disk = "%s/%s"  % (match.group(1), disk)
+  return "/sys/block/%s" % disk
+
+
 def ExecCommand(command):
   """Executes a command.
 
@@ -415,7 +446,8 @@ def GetDiskList(opts):
     if not compat.any([name.startswith(pfx) for pfx in SUPPORTED_TYPES]):
       continue
 
-    size = ReadSize("/sys/block/%s" % name)
+    disksysfsname = "/sys/block/%s" % name
+    size = ReadSize(disksysfsname)
 
     f = open("/sys/block/%s/removable" % name)
     removable = int(f.read().strip())
@@ -424,18 +456,21 @@ def GetDiskList(opts):
     if removable and not opts.removable_ok:
       continue
 
-    dev = ReadDev("/sys/block/%s" % name)
+    dev = ReadDev(disksysfsname)
     CheckSysDev(name, dev)
     inuse = InUse(name)
     # Enumerate partitions of the block device
     partitions = []
-    for partname in os.listdir("/sys/block/%s" % name):
+    for partname in os.listdir(disksysfsname):
       if not partname.startswith(name):
         continue
-      partdev = ReadDev("/sys/block/%s/%s" % (name, partname))
-      partsize = ReadSize("/sys/block/%s/%s" % (name, partname))
-      CheckSysDev(partname, partdev)
-      partitions.append((partname, partsize, partdev))
+      partsysfsname = "%s/%s" % (disksysfsname, partname)
+      partdev = ReadDev(partsysfsname)
+      partsize = ReadSize(partsysfsname)
+      if partsize >= PART_MINSIZE:
+        CheckSysDev(partname, partdev)
+        partinuse = InUse(partname)
+        partitions.append((partname, partsize, partdev, partinuse))
     partitions.sort()
     dlist.append((name, size, dev, partitions, inuse))
   dlist.sort()
@@ -460,7 +495,7 @@ def GetMountInfo():
   for line in mountlines:
     _, mountpoint, fstype, _ = line.split(None, 3)
     # fs type blacklist
-    if fstype in ["nfs", "nfs4", "autofs", "tmpfs", "proc", "sysfs"]:
+    if fstype in EXCLUDED_FS:
       continue
     try:
       dev = os.stat(mountpoint).st_dev
@@ -477,6 +512,14 @@ def GetMountInfo():
   return mounts
 
 
+def GetSwapInfo():
+  """Reads /proc/swaps and returns the list of swap backing stores.
+
+  """
+  swaplines = ReadFile("/proc/swaps").splitlines()[1:]
+  return [line.split(None, 1)[0] for line in swaplines]
+
+
 def DevInfo(name, dev, mountinfo):
   """Computes miscellaneous information about a block device.
 
@@ -511,6 +554,12 @@ def ShowDiskInfo(opts):
   choice about which disks should be allocated to our volume group.
 
   """
+  def _inuse(inuse):
+    if inuse:
+      return "yes"
+    else:
+      return "no"
+
   mounts = GetMountInfo()
   dlist = GetDiskList(opts)
 
@@ -528,13 +577,9 @@ def ShowDiskInfo(opts):
   flatlist = []
   # Flatten the [(disk, [partition,...]), ...] list
   for name, size, dev, parts, inuse in dlist:
-    if inuse:
-      str_inuse = "yes"
-    else:
-      str_inuse = "no"
-    flatlist.append((name, size, dev, str_inuse))
-    for partname, partsize, partdev in parts:
-      flatlist.append((partname, partsize, partdev, ""))
+    flatlist.append((name, size, dev, _inuse(inuse)))
+    for partname, partsize, partdev, partinuse in parts:
+      flatlist.append((partname, partsize, partdev, _inuse(partinuse)))
 
   strlist = []
   for name, size, dev, in_use in flatlist:
@@ -572,7 +617,7 @@ def CheckSysfsHolders(name):
 
   """
   try:
-    contents = os.listdir("/sys/block/%s/holders/" % name)
+    contents = os.listdir("%s/holders/" % SysfsName(name))
   except OSError, err:
     if err.errno == errno.ENOENT:
       contents = []
@@ -613,11 +658,32 @@ def CheckReread(name):
     return result.failed
 
 
+def CheckMounted(name):
+  """Check to see if a block device is a mountpoint.
+
+  In recent distros/kernels, this is reported directly via fuser, but
+  on older ones not, so we do an additional check here (manually).
+
+  """
+  minfo = GetMountInfo()
+  dev = ReadDev(SysfsName(name))
+  return dev not in minfo
+
+
+def CheckSwap(name):
+  """Check to see if a block device is being used as swap.
+
+  """
+  name = "/dev/%s" % name
+  return name not in GetSwapInfo()
+
+
 def InUse(name):
   """Returns if a disk is in use or not.
 
   """
-  return not (CheckSysfsHolders(name) and CheckReread(name))
+  return not (CheckSysfsHolders(name) and CheckReread(name) and
+              CheckMounted(name) and CheckSwap(name))
 
 
 def WipeDisk(name):
@@ -739,9 +805,14 @@ def ValidateDiskList(options):
                       " non-removable block devices).")
   sysd_free = []
   sysd_used = []
-  for name, _, _, _, used in sysdisks:
+  for name, _, _, parts, used in sysdisks:
     if used:
       sysd_used.append(name)
+      for partname, _, _, partused in parts:
+        if partused:
+          sysd_used.append(partname)
+        else:
+          sysd_free.append(partname)
     else:
       sysd_free.append(name)
 
@@ -840,8 +911,8 @@ def main():
     print >> sys.stderr, str(err)
     sys.exit(1)
   except ProgrammingError, err:
-    print >> sys.stderr, ("Internal application error. Please signal this"
-                          " to xencluster-team.")
+    print >> sys.stderr, ("Internal application error. Please report this"
+                          " to the Ganeti developer list.")
     print >> sys.stderr, "Error description: %s" % str(err)
     sys.exit(1)
   except Error, err: