diff --git a/doc/examples/dumb-allocator b/doc/examples/dumb-allocator
index d7e94868febc92afe62a97376a9a19a9f8d12bcc..d181b69b55db267f4556889eb5355ef41f73da96 100755
--- a/doc/examples/dumb-allocator
+++ b/doc/examples/dumb-allocator
@@ -74,12 +74,13 @@ def main():
   nodes =  data["nodes"]
   request = data["request"]
   req_type = request["type"]
+  offline_nodes = [name for name in nodes if nodes[name]["offline"]]
   if req_type == "allocate":
-    forbidden_nodes = []
+    forbidden_nodes = offline_nodes
     inst_data = request
   elif req_type == "relocate":
     idict = data["instances"][request["name"]]
-    forbidden_nodes = idict["nodes"]
+    forbidden_nodes = idict["nodes"] + offline_nodes
     inst_data = idict
     inst_data["disk_space_total"] = request["disk_space_total"]
   else:
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index e337b686ae760aea09c578bdb78753646dff2fa1..e54c17962e6e30d97d8bad3f8d34ea6490ba6d1d 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -6374,7 +6374,7 @@ class IAllocator(object):
       "version": 1,
       "cluster_name": cfg.GetClusterName(),
       "cluster_tags": list(cluster_info.GetTags()),
-      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
+      "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
       # we don't have job IDs
       }
     iinfo = cfg.GetAllInstancesInfo().values()
@@ -6393,52 +6393,60 @@ class IAllocator(object):
                                            hypervisor_name)
     node_iinfo = self.lu.rpc.call_all_instances_info(node_list,
                        cluster_info.enabled_hypervisors)
-    for nname in node_list:
+    for nname, nresult in node_data.items():
+      # first fill in static (config-based) values
       ninfo = cfg.GetNodeInfo(nname)
-      node_data[nname].Raise()
-      if not isinstance(node_data[nname].data, dict):
-        raise errors.OpExecError("Can't get data for node %s" % nname)
-      remote_info = node_data[nname].data
-      for attr in ['memory_total', 'memory_free', 'memory_dom0',
-                   'vg_size', 'vg_free', 'cpu_total']:
-        if attr not in remote_info:
-          raise errors.OpExecError("Node '%s' didn't return attribute '%s'" %
-                                   (nname, attr))
-        try:
-          remote_info[attr] = int(remote_info[attr])
-        except ValueError, err:
-          raise errors.OpExecError("Node '%s' returned invalid value for '%s':"
-                                   " %s" % (nname, attr, str(err)))
-      # compute memory used by primary instances
-      i_p_mem = i_p_up_mem = 0
-      for iinfo, beinfo in i_list:
-        if iinfo.primary_node == nname:
-          i_p_mem += beinfo[constants.BE_MEMORY]
-          if iinfo.name not in node_iinfo[nname]:
-            i_used_mem = 0
-          else:
-            i_used_mem = int(node_iinfo[nname][iinfo.name]['memory'])
-          i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
-          remote_info['memory_free'] -= max(0, i_mem_diff)
-
-          if iinfo.admin_up:
-            i_p_up_mem += beinfo[constants.BE_MEMORY]
-
-      # compute memory used by instances
       pnr = {
         "tags": list(ninfo.GetTags()),
-        "total_memory": remote_info['memory_total'],
-        "reserved_memory": remote_info['memory_dom0'],
-        "free_memory": remote_info['memory_free'],
-        "i_pri_memory": i_p_mem,
-        "i_pri_up_memory": i_p_up_mem,
-        "total_disk": remote_info['vg_size'],
-        "free_disk": remote_info['vg_free'],
         "primary_ip": ninfo.primary_ip,
         "secondary_ip": ninfo.secondary_ip,
-        "total_cpus": remote_info['cpu_total'],
         "offline": ninfo.offline,
+        "master_candidate": ninfo.master_candidate,
         }
+
+      if not ninfo.offline:
+        nresult.Raise()
+        if not isinstance(nresult.data, dict):
+          raise errors.OpExecError("Can't get data for node %s" % nname)
+        remote_info = nresult.data
+        for attr in ['memory_total', 'memory_free', 'memory_dom0',
+                     'vg_size', 'vg_free', 'cpu_total']:
+          if attr not in remote_info:
+            raise errors.OpExecError("Node '%s' didn't return attribute"
+                                     " '%s'" % (nname, attr))
+          try:
+            remote_info[attr] = int(remote_info[attr])
+          except ValueError, err:
+            raise errors.OpExecError("Node '%s' returned invalid value"
+                                     " for '%s': %s" % (nname, attr, err))
+        # compute memory used by primary instances
+        i_p_mem = i_p_up_mem = 0
+        for iinfo, beinfo in i_list:
+          if iinfo.primary_node == nname:
+            i_p_mem += beinfo[constants.BE_MEMORY]
+            if iinfo.name not in node_iinfo[nname].data:
+              i_used_mem = 0
+            else:
+              i_used_mem = int(node_iinfo[nname].data[iinfo.name]['memory'])
+            i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
+            remote_info['memory_free'] -= max(0, i_mem_diff)
+
+            if iinfo.admin_up:
+              i_p_up_mem += beinfo[constants.BE_MEMORY]
+
+        # compute memory used by instances
+        pnr_dyn = {
+          "total_memory": remote_info['memory_total'],
+          "reserved_memory": remote_info['memory_dom0'],
+          "free_memory": remote_info['memory_free'],
+          "total_disk": remote_info['vg_size'],
+          "free_disk": remote_info['vg_free'],
+          "total_cpus": remote_info['cpu_total'],
+          "i_pri_memory": i_p_mem,
+          "i_pri_up_memory": i_p_up_mem,
+          }
+        pnr.update(pnr_dyn)
+
       node_results[nname] = pnr
     data["nodes"] = node_results
 
@@ -6449,13 +6457,13 @@ class IAllocator(object):
                   for n in iinfo.nics]
       pir = {
         "tags": list(iinfo.GetTags()),
-        "should_run": iinfo.admin_up,
+        "admin_up": iinfo.admin_up,
         "vcpus": beinfo[constants.BE_VCPUS],
         "memory": beinfo[constants.BE_MEMORY],
         "os": iinfo.os,
-        "nodes": list(iinfo.all_nodes),
+        "nodes": [iinfo.primary_node] + list(iinfo.secondary_nodes),
         "nics": nic_data,
-        "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
+        "disks": [{"size": dsk.size, "mode": dsk.mode} for dsk in iinfo.disks],
         "disk_template": iinfo.disk_template,
         "hypervisor": iinfo.hypervisor,
         }
diff --git a/scripts/gnt-backup b/scripts/gnt-backup
index 0cb6abf9cbce6a1fd2f8a927bb4145bc9b55f493..b0a046c5d2bdcf6bb5de05d143169987969c839b 100755
--- a/scripts/gnt-backup
+++ b/scripts/gnt-backup
@@ -208,7 +208,7 @@ import_opts = [
   make_option("--no-ip-check", dest="ip_check", default=True,
               action="store_false", help="Don't check that the instance's IP"
               " is alive"),
-  make_option("--iallocator", metavar="<NAME>",
+  make_option("-I", "--iallocator", metavar="<NAME>",
               help="Select nodes for the instance automatically using the"
               " <NAME> iallocator plugin", default=None, type="string"),
   make_option("--file-storage-dir", dest="file_storage_dir",
diff --git a/scripts/gnt-instance b/scripts/gnt-instance
index 369ae347d6c6ccf2ae0638f0f05a14673d0baa54..959990ad9f054f60e46185ef2ef2e1d2d9213993 100755
--- a/scripts/gnt-instance
+++ b/scripts/gnt-instance
@@ -1268,7 +1268,7 @@ add_opts = [
               metavar="<DIR>"),
   make_option("--file-driver", dest="file_driver", help="Driver to use"
               " for image files", default="loop", metavar="<DRIVER>"),
-  make_option("--iallocator", metavar="<NAME>",
+  make_option("-I", "--iallocator", metavar="<NAME>",
               help="Select nodes for the instance automatically using the"
               " <NAME> iallocator plugin", default=None, type="string"),
   ikv_option("-H", "--hypervisor", dest="hypervisor",
diff --git a/tools/burnin b/tools/burnin
index c8d76f9783f693a9f9321e53322213baa171e90b..0c2e94d7b1a58e1d8eb43762c7ee8f8a34537537 100755
--- a/tools/burnin
+++ b/tools/burnin
@@ -249,7 +249,7 @@ class Burner(object):
     parser.add_option("-n", "--nodes", dest="nodes", default="",
                       help="Comma separated list of nodes to perform"
                       " the burnin on (defaults to all nodes)")
-    parser.add_option("--iallocator", dest="iallocator",
+    parser.add_option("-I", "--iallocator", dest="iallocator",
                       default=None, type="string",
                       help="Perform the allocation using an iallocator"
                       " instead of fixed node spread (node restrictions no"