diff --git a/daemons/ganeti-noded b/daemons/ganeti-noded
index 6c5831182889c4ce7e5bc3cfed465de319fc8b7d..d4aafd3ae06663d7e3510e7f64e636c9fb385824 100755
--- a/daemons/ganeti-noded
+++ b/daemons/ganeti-noded
@@ -387,14 +387,14 @@ class NodeDaemonRequestHandler(http.HTTPRequestHandler):
     """Query information about all instances.
 
     """
-    return backend.GetAllInstancesInfo()
+    return backend.GetAllInstancesInfo(params[0])
 
   @staticmethod
   def perspective_instance_list(params):
     """Query the list of running instances.
 
     """
-    return backend.GetInstanceList()
+    return backend.GetInstanceList(params[0])
 
   # node --------------------------
 
@@ -411,8 +411,8 @@ class NodeDaemonRequestHandler(http.HTTPRequestHandler):
     """Query node information.
 
     """
-    vgname = params[0]
-    return backend.GetNodeInfo(vgname)
+    vgname, hypervisor_type = params
+    return backend.GetNodeInfo(vgname, hypervisor_type)
 
   @staticmethod
   def perspective_node_add(params):
diff --git a/lib/backend.py b/lib/backend.py
index 5e11fcb0adc8b24217908e50ee5fea0178f27dce..0df7a005ed02d0438926c56d9ce48772a1656a26 100644
--- a/lib/backend.py
+++ b/lib/backend.py
@@ -231,18 +231,21 @@ def LeaveCluster():
   raise errors.QuitGanetiException(False, 'Shutdown scheduled')
 
 
-def GetNodeInfo(vgname):
+def GetNodeInfo(vgname, hypervisor_type):
   """Gives back a hash with different informations about the node.
 
-  Returns:
-    { 'vg_size' : xxx,  'vg_free' : xxx, 'memory_domain0': xxx,
-      'memory_free' : xxx, 'memory_total' : xxx }
-    where
-    vg_size is the size of the configured volume group in MiB
-    vg_free is the free size of the volume group in MiB
-    memory_dom0 is the memory allocated for domain0 in MiB
-    memory_free is the currently available (free) ram in MiB
-    memory_total is the total number of ram in MiB
+  @type vgname: C{string}
+  @param vgname: the name of the volume group to ask for disk space information
+  @type hypervisor_type: C{str}
+  @param hypervisor_type: the name of the hypervisor to ask for
+      memory information
+  @rtype: C{dict}
+  @return: dictionary with the following keys:
+      - vg_size is the size of the configured volume group in MiB
+      - vg_free is the free size of the volume group in MiB
+      - memory_dom0 is the memory allocated for domain0 in MiB
+      - memory_free is the currently available (free) ram in MiB
+      - memory_total is the total number of ram in MiB
 
   """
   outputarray = {}
@@ -250,7 +253,7 @@ def GetNodeInfo(vgname):
   outputarray['vg_size'] = vginfo['vg_size']
   outputarray['vg_free'] = vginfo['vg_free']
 
-  hyper = hypervisor.GetHypervisor(_GetConfig())
+  hyper = hypervisor.GetHypervisor(hypervisor_type)
   hyp_info = hyper.GetNodeInfo()
   if hyp_info is not None:
     outputarray.update(hyp_info)
@@ -267,25 +270,36 @@ def GetNodeInfo(vgname):
 def VerifyNode(what, cluster_name):
   """Verify the status of the local node.
 
-  Args:
-    what - a dictionary of things to check:
-      'filelist' : list of files for which to compute checksums
-      'nodelist' : list of nodes we should check communication with
-      'hypervisor': run the hypervisor-specific verify
+  Based on the input L{what} parameter, various checks are done on the
+  local node.
+
+  If the I{filelist} key is present, this list of
+  files is checksummed and the file/checksum pairs are returned.
+
+  If the I{nodelist} key is present, we check that we have
+  connectivity via ssh with the target nodes (and check the hostname
+  report).
 
-  Requested files on local node are checksummed and the result returned.
+  If the I{node-net-test} key is present, we check that we have
+  connectivity to the given nodes via both primary IP and, if
+  applicable, secondary IPs.
+
+  @type what: C{dict}
+  @param what: a dictionary of things to check:
+      - filelist: list of files for which to compute checksums
+      - nodelist: list of nodes we should check ssh communication with
+      - node-net-test: list of nodes we should check node daemon port
+        connectivity with
+      - hypervisor: list with hypervisors to run the verify for
 
-  The nodelist is traversed, with the following checks being made
-  for each node:
-  - known_hosts key correct
-  - correct resolving of node name (target node returns its own hostname
-    by ssh-execution of 'hostname', result compared against name in list.
 
   """
   result = {}
 
   if 'hypervisor' in what:
-    result['hypervisor'] = hypervisor.GetHypervisor(_GetConfig()).Verify()
+    result['hypervisor'] = my_dict = {}
+    for hv_name in what['hypervisor']:
+      my_dict[hv_name] = hypervisor.GetHypervisor(hv_name).Verify()
 
   if 'filelist' in what:
     result['filelist'] = utils.FingerprintFiles(what['filelist'])
@@ -415,41 +429,49 @@ def BridgesExist(bridges_list):
   return True
 
 
-def GetInstanceList():
+def GetInstanceList(hypervisor_list):
   """Provides a list of instances.
 
-  Returns:
-    A list of all running instances on the current node
-    - instance1.example.com
-    - instance2.example.com
+  @type hypervisor_list: list
+  @param hypervisor_list: the list of hypervisors to query information
+
+  @rtype: list
+  @return: a list of all running instances on the current node
+             - instance1.example.com
+             - instance2.example.com
 
   """
-  try:
-    names = hypervisor.GetHypervisor(_GetConfig()).ListInstances()
-  except errors.HypervisorError, err:
-    logging.exception("Error enumerating instances")
-    raise
+  results = []
+  for hname in hypervisor_list:
+    try:
+      names = hypervisor.GetHypervisor(hname).ListInstances()
+      results.extend(names)
+    except errors.HypervisorError, err:
+      logging.exception("Error enumerating instances for hypevisor %s", hname)
+      # FIXME: should we somehow not propagate this to the master?
+      raise
 
-  return names
+  return results
 
 
-def GetInstanceInfo(instance):
+def GetInstanceInfo(instance, hname):
   """Gives back the informations about an instance as a dictionary.
 
-  Args:
-    instance: name of the instance (ex. instance1.example.com)
+  @type instance: string
+  @param instance: the instance name
+  @type hname: string
+  @param hname: the hypervisor type of the instance
 
-  Returns:
-    { 'memory' : 511, 'state' : '-b---', 'time' : 3188.8, }
-    where
-    memory: memory size of instance (int)
-    state: xen state of instance (string)
-    time: cpu time of instance (float)
+  @rtype: dict
+  @return: dictionary with the following keys:
+      - memory: memory size of instance (int)
+      - state: xen state of instance (string)
+      - time: cpu time of instance (float)
 
   """
   output = {}
 
-  iinfo = hypervisor.GetHypervisor(_GetConfig()).GetInstanceInfo(instance)
+  iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance)
   if iinfo is not None:
     output['memory'] = iinfo[2]
     output['state'] = iinfo[4]
@@ -458,34 +480,38 @@ def GetInstanceInfo(instance):
   return output
 
 
-def GetAllInstancesInfo():
+def GetAllInstancesInfo(hypervisor_list):
   """Gather data about all instances.
 
   This is the equivalent of `GetInstanceInfo()`, except that it
   computes data for all instances at once, thus being faster if one
   needs data about more than one instance.
 
-  Returns: a dictionary of dictionaries, keys being the instance name,
-    and with values:
-    { 'memory' : 511, 'state' : '-b---', 'time' : 3188.8, }
-    where
-    memory: memory size of instance (int)
-    state: xen state of instance (string)
-    time: cpu time of instance (float)
-    vcpus: the number of cpus
+  @type hypervisor_list: list
+  @param hypervisor_list: list of hypervisors to query for instance data
+
+  @rtype: dict of dicts
+  @return: dictionary of instance: data, with data having the following keys:
+      - memory: memory size of instance (int)
+      - state: xen state of instance (string)
+      - time: cpu time of instance (float)
+      - vcpuus: the number of vcpus
 
   """
   output = {}
 
-  iinfo = hypervisor.GetHypervisor(_GetConfig()).GetAllInstancesInfo()
-  if iinfo:
-    for name, inst_id, memory, vcpus, state, times in iinfo:
-      output[name] = {
-        'memory': memory,
-        'vcpus': vcpus,
-        'state': state,
-        'time': times,
-        }
+  for hname in hypervisor_list:
+    iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo()
+    if iinfo:
+      for name, inst_id, memory, vcpus, state, times in iinfo:
+        if name in output:
+          raise errors.HypervisorError("Instance %s running duplicate" % name)
+        output[name] = {
+          'memory': memory,
+          'vcpus': vcpus,
+          'state': state,
+          'time': times,
+          }
 
   return output
 
@@ -499,7 +525,6 @@ def AddOSToInstance(instance, os_disk, swap_disk):
     swap_disk: the instance-visible name of the swap device
 
   """
-  cfg = _GetConfig()
   inst_os = OSFromDisk(instance.os)
 
   create_script = inst_os.create_script
@@ -535,7 +560,7 @@ def AddOSToInstance(instance, os_disk, swap_disk):
                                 inst_os.path, create_script, instance.name,
                                 real_os_dev.dev_path, real_swap_dev.dev_path,
                                 logfile)
-  env = {'HYPERVISOR': cfg.GetHypervisorType()}
+  env = {'HYPERVISOR': instance.hypervisor}
 
   result = utils.RunCmd(command, env=env)
   if result.failed:
@@ -666,17 +691,19 @@ def _GatherBlockDevs(instance):
 def StartInstance(instance, extra_args):
   """Start an instance.
 
-  Args:
-    instance - name of instance to start.
+  @type instance: instance object
+  @param instance: the instance object
+  @rtype: boolean
+  @return: whether the startup was successful or not
 
   """
-  running_instances = GetInstanceList()
+  running_instances = GetInstanceList([instance.hypervisor])
 
   if instance.name in running_instances:
     return True
 
   block_devices = _GatherBlockDevs(instance)
-  hyper = hypervisor.GetHypervisor(_GetConfig())
+  hyper = hypervisor.GetHypervisor(instance.hypervisor)
 
   try:
     hyper.StartInstance(instance, block_devices, extra_args)
@@ -690,16 +717,19 @@ def StartInstance(instance, extra_args):
 def ShutdownInstance(instance):
   """Shut an instance down.
 
-  Args:
-    instance - name of instance to shutdown.
+  @type instance: instance object
+  @param instance: the instance object
+  @rtype: boolean
+  @return: whether the startup was successful or not
 
   """
-  running_instances = GetInstanceList()
+  hv_name = instance.hypervisor
+  running_instances = GetInstanceList([hv_name])
 
   if instance.name not in running_instances:
     return True
 
-  hyper = hypervisor.GetHypervisor(_GetConfig())
+  hyper = hypervisor.GetHypervisor(hv_name)
   try:
     hyper.StopInstance(instance)
   except errors.HypervisorError, err:
@@ -711,7 +741,7 @@ def ShutdownInstance(instance):
 
   time.sleep(1)
   for dummy in range(11):
-    if instance.name not in GetInstanceList():
+    if instance.name not in GetInstanceList([hv_name]):
       break
     time.sleep(10)
   else:
@@ -725,7 +755,7 @@ def ShutdownInstance(instance):
       return False
 
     time.sleep(1)
-    if instance.name in GetInstanceList():
+    if instance.name in GetInstanceList([hv_name]):
       logging.error("could not shutdown instance '%s' even by destroy",
                     instance.name)
       return False
@@ -741,13 +771,13 @@ def RebootInstance(instance, reboot_type, extra_args):
     reboot_type - how to reboot [soft,hard,full]
 
   """
-  running_instances = GetInstanceList()
+  running_instances = GetInstanceList([instance.hypervisor])
 
   if instance.name not in running_instances:
     logging.error("Cannot reboot instance that is not running")
     return False
 
-  hyper = hypervisor.GetHypervisor(_GetConfig())
+  hyper = hypervisor.GetHypervisor(instance.hypervisor)
   if reboot_type == constants.INSTANCE_REBOOT_SOFT:
     try:
       hyper.RebootInstance(instance)
@@ -764,7 +794,6 @@ def RebootInstance(instance, reboot_type, extra_args):
   else:
     raise errors.ParameterError("reboot_type invalid")
 
-
   return True
 
 
@@ -784,7 +813,7 @@ def MigrateInstance(instance, target, live):
       - msg is a string with details in case of failure
 
   """
-  hyper = hypervisor.GetHypervisor(_GetConfig())
+  hyper = hypervisor.GetHypervisor(instance.hypervisor_name)
 
   try:
     hyper.MigrateInstance(instance.name, target, live)
@@ -1464,7 +1493,6 @@ def ImportOSIntoInstance(instance, os_disk, swap_disk, src_node, src_image,
     False in case of error, True otherwise.
 
   """
-  cfg = _GetConfig()
   inst_os = OSFromDisk(instance.os)
   import_script = inst_os.import_script
 
@@ -1507,7 +1535,7 @@ def ImportOSIntoInstance(instance, os_disk, swap_disk, src_node, src_image,
                                logfile)
 
   command = '|'.join([utils.ShellQuoteArgs(remotecmd), comprcmd, impcmd])
-  env = {'HYPERVISOR': cfg.GetHypervisorType()}
+  env = {'HYPERVISOR': instance.hypervisor}
 
   result = utils.RunCmd(command, env=env)
 
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index 9dd65554a0d89dd2515a1cf1166a37b95f9d76bc..8c038458836a57f94daa9d48c5f66ef7cd717054 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -586,8 +586,11 @@ class LUVerifyCluster(LogicalUnit):
                           (node, node_result['node-net-test'][node]))
 
     hyp_result = node_result.get('hypervisor', None)
-    if hyp_result is not None:
-      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
+    if isinstance(hyp_result, dict):
+      for hv_name, hv_result in hyp_result.iteritems():
+        if hv_result is not None:
+          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
+                      (hv_name, hv_result))
     return bad
 
   def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
@@ -721,6 +724,7 @@ class LUVerifyCluster(LogicalUnit):
       feedback_fn("  - ERROR: %s" % msg)
 
     vg_name = self.cfg.GetVGName()
+    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
     nodelist = utils.NiceSort(self.cfg.GetNodeList())
     nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
     instancelist = utils.NiceSort(self.cfg.GetInstanceList())
@@ -739,19 +743,20 @@ class LUVerifyCluster(LogicalUnit):
 
     feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
     all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
-    all_instanceinfo = rpc.call_instance_list(nodelist)
+    all_instanceinfo = rpc.call_instance_list(nodelist, hypervisors)
     all_vglist = rpc.call_vg_list(nodelist)
     node_verify_param = {
       'filelist': file_names,
       'nodelist': nodelist,
-      'hypervisor': None,
+      'hypervisor': hypervisors,
       'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
                         for node in nodeinfo]
       }
     all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param,
                                       self.cfg.GetClusterName())
     all_rversion = rpc.call_version(nodelist)
-    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
+    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName(),
+                                   self.cfg.GetHypervisorType())
 
     for node in nodelist:
       feedback_fn("* Verifying node %s" % node)
@@ -1470,7 +1475,8 @@ class LUQueryNodes(NoHooksLU):
 
     if self.dynamic_fields.intersection(self.op.output_fields):
       live_data = {}
-      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
+      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
+                                     self.cfg.GetHypervisorType())
       for name in nodenames:
         nodeinfo = node_data.get(name, None)
         if nodeinfo:
@@ -1808,7 +1814,7 @@ class LUAddNode(LogicalUnit):
                        (fname, to_node))
 
     to_copy = []
-    if self.cfg.GetHypervisorType() == constants.HT_XEN_HVM31:
+    if constants.HT_XEN_HVM31 in self.cfg.GetClusterInfo().enabled_hypervisors:
       to_copy.append(constants.VNC_PASSWORD_FILE)
     for fname in to_copy:
       result = rpc.call_upload_file([node], fname)
@@ -1852,6 +1858,7 @@ class LUQueryClusterInfo(NoHooksLU):
       "master": self.cfg.GetMasterNode(),
       "architecture": (platform.architecture()[0], platform.machine()),
       "hypervisor_type": self.cfg.GetHypervisorType(),
+      "enabled_hypervisors": self.cfg.GetClusterInfo().enabled_hypervisors,
       }
 
     return result
@@ -2047,7 +2054,8 @@ def _SafeShutdownInstanceDisks(instance, cfg):
   _ShutdownInstanceDisks.
 
   """
-  ins_l = rpc.call_instance_list([instance.primary_node])
+  ins_l = rpc.call_instance_list([instance.primary_node],
+                                 [instance.hypervisor])
   ins_l = ins_l[instance.primary_node]
   if not type(ins_l) is list:
     raise errors.OpExecError("Can't contact node '%s'" %
@@ -2081,7 +2089,7 @@ def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
   return result
 
 
-def _CheckNodeFreeMemory(cfg, node, reason, requested):
+def _CheckNodeFreeMemory(cfg, node, reason, requested, hypervisor):
   """Checks if a node has enough free memory.
 
   This function check if a given node has the needed amount of free
@@ -2089,14 +2097,21 @@ def _CheckNodeFreeMemory(cfg, node, reason, requested):
   information from the node, this function raise an OpPrereqError
   exception.
 
-  Args:
-    - cfg: a ConfigWriter instance
-    - node: the node name
-    - reason: string to use in the error message
-    - requested: the amount of memory in MiB
+  @type cfg: C{config.ConfigWriter}
+  @param cfg: the ConfigWriter instance from which we get configuration data
+  @type node: C{str}
+  @param node: the node to check
+  @type reason: C{str}
+  @param reason: string to use in the error message
+  @type requested: C{int}
+  @param requested: the amount of memory in MiB to check for
+  @type hypervisor: C{str}
+  @param hypervisor: the hypervisor to ask for memory stats
+  @raise errors.OpPrereqError: if the node doesn't have enough memory, or
+      we cannot check the node
 
   """
-  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
+  nodeinfo = rpc.call_node_info([node], cfg.GetVGName(), hypervisor)
   if not nodeinfo or not isinstance(nodeinfo, dict):
     raise errors.OpPrereqError("Could not contact node %s for resource"
                              " information" % (node,))
@@ -2158,7 +2173,7 @@ class LUStartupInstance(LogicalUnit):
 
     _CheckNodeFreeMemory(self.cfg, instance.primary_node,
                          "starting instance %s" % instance.name,
-                         instance.memory)
+                         instance.memory, instance.hypervisor)
 
   def Exec(self, feedback_fn):
     """Start the instance.
@@ -2357,7 +2372,8 @@ class LUReinstallInstance(LogicalUnit):
     if instance.status != "down":
       raise errors.OpPrereqError("Instance '%s' is marked to be up" %
                                  self.op.instance_name)
-    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
+    remote_info = rpc.call_instance_info(instance.primary_node, instance.name,
+                                         instance.hypervisor)
     if remote_info:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
@@ -2434,7 +2450,8 @@ class LURenameInstance(LogicalUnit):
     if instance.status != "down":
       raise errors.OpPrereqError("Instance '%s' is marked to be up" %
                                  self.op.instance_name)
-    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
+    remote_info = rpc.call_instance_info(instance.primary_node, instance.name,
+                                         instance.hypervisor)
     if remote_info:
       raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
                                  (self.op.instance_name,
@@ -2590,7 +2607,7 @@ class LUQueryInstances(NoHooksLU):
       "hvm_boot_order", "hvm_acpi", "hvm_pae",
       "hvm_cdrom_image_path", "hvm_nic_type",
       "hvm_disk_type", "vnc_bind_address",
-      "serial_no",
+      "serial_no", "hypervisor",
       ])
     _CheckOutputFields(static=self.static_fields,
                        dynamic=self.dynamic_fields,
@@ -2642,11 +2659,12 @@ class LUQueryInstances(NoHooksLU):
     # begin data gathering
 
     nodes = frozenset([inst.primary_node for inst in instance_list])
+    hv_list = list(set([inst.hypervisor for inst in instance_list]))
 
     bad_nodes = []
     if self.dynamic_fields.intersection(self.op.output_fields):
       live_data = {}
-      node_data = rpc.call_all_instances_info(nodes)
+      node_data = rpc.call_all_instances_info(nodes, hv_list)
       for name in nodes:
         result = node_data[name]
         if result:
@@ -2734,6 +2752,8 @@ class LUQueryInstances(NoHooksLU):
             val = "default"
           else:
             val = "-"
+        elif field == "hypervisor":
+          val = instance.hypervisor
         else:
           raise errors.ParameterError(field)
         iout.append(val)
@@ -2795,7 +2815,8 @@ class LUFailoverInstance(LogicalUnit):
     target_node = secondary_nodes[0]
     # check memory requirements on the secondary node
     _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
-                         instance.name, instance.memory)
+                         instance.name, instance.memory,
+                         instance.hypervisor)
 
     # check bridge existance
     brlist = [nic.bridge for nic in instance.nics]
@@ -3150,7 +3171,7 @@ class LUCreateInstance(LogicalUnit):
     for attr in ["kernel_path", "initrd_path", "pnode", "snode",
                  "iallocator", "hvm_boot_order", "hvm_acpi", "hvm_pae",
                  "hvm_cdrom_image_path", "hvm_nic_type", "hvm_disk_type",
-                 "vnc_bind_address"]:
+                 "vnc_bind_address", "hypervisor"]:
       if not hasattr(self.op, attr):
         setattr(self.op, attr, None)
 
@@ -3327,6 +3348,19 @@ class LUCreateInstance(LogicalUnit):
       raise errors.OpPrereqError("Cluster does not support lvm-based"
                                  " instances")
 
+    # cheap checks (from the config only)
+
+    if self.op.hypervisor is None:
+      self.op.hypervisor = self.cfg.GetHypervisorType()
+
+    enabled_hvs = self.cfg.GetClusterInfo().enabled_hypervisors
+    if self.op.hypervisor not in enabled_hvs:
+      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
+                                 " cluster (%s)" % (self.op.hypervisor,
+                                  ",".join(enabled_hvs)))
+
+    # costly checks (from nodes)
+
     if self.op.mode == constants.INSTANCE_IMPORT:
       src_node = self.op.src_node
       src_path = self.op.src_path
@@ -3401,7 +3435,8 @@ class LUCreateInstance(LogicalUnit):
     # Check lv size requirements
     if req_size is not None:
       nodenames = [pnode.name] + self.secondaries
-      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
+      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
+                                    self.op.hypervisor)
       for node in nodenames:
         info = nodeinfo.get(node, None)
         if not info:
@@ -3435,7 +3470,7 @@ class LUCreateInstance(LogicalUnit):
     if self.op.start:
       _CheckNodeFreeMemory(self.cfg, self.pnode.name,
                            "creating instance %s" % self.op.instance_name,
-                           self.op.mem_size)
+                           self.op.mem_size, self.op.hypervisor)
 
     # hvm_cdrom_image_path verification
     if self.op.hvm_cdrom_image_path is not None:
@@ -3458,7 +3493,7 @@ class LUCreateInstance(LogicalUnit):
                                    self.op.vnc_bind_address)
 
     # Xen HVM device type checks
-    if self.cfg.GetHypervisorType() == constants.HT_XEN_HVM31:
+    if self.op.hypervisor == constants.HT_XEN_HVM31:
       if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
         raise errors.OpPrereqError("Invalid NIC type %s specified for Xen HVM"
                                    " hypervisor" % self.op.hvm_nic_type)
@@ -3487,7 +3522,7 @@ class LUCreateInstance(LogicalUnit):
     if self.inst_ip is not None:
       nic.ip = self.inst_ip
 
-    ht_kind = self.cfg.GetHypervisorType()
+    ht_kind = self.op.hypervisor
     if ht_kind in constants.HTS_REQ_PORT:
       network_port = self.cfg.AllocatePort()
     else:
@@ -3533,6 +3568,7 @@ class LUCreateInstance(LogicalUnit):
                             vnc_bind_address=self.op.vnc_bind_address,
                             hvm_nic_type=self.op.hvm_nic_type,
                             hvm_disk_type=self.op.hvm_disk_type,
+                            hypervisor=self.op.hypervisor,
                             )
 
     feedback_fn("* creating instance disks...")
@@ -3632,7 +3668,8 @@ class LUConnectConsole(NoHooksLU):
     instance = self.instance
     node = instance.primary_node
 
-    node_insts = rpc.call_instance_list([node])[node]
+    node_insts = rpc.call_instance_list([node],
+                                        [instance.hypervisor])[node]
     if node_insts is False:
       raise errors.OpExecError("Can't connect to node %s." % node)
 
@@ -3641,7 +3678,7 @@ class LUConnectConsole(NoHooksLU):
 
     logger.Debug("connecting to console of %s on %s" % (instance.name, node))
 
-    hyper = hypervisor.GetHypervisor(self.cfg)
+    hyper = hypervisor.GetHypervisor(instance.hypervisor)
     console_cmd = hyper.GetShellCommandForConsole(instance)
 
     # build ssh cmdline
@@ -4243,7 +4280,8 @@ class LUGrowDisk(LogicalUnit):
                                  (self.op.disk, instance.name))
 
     nodenames = [instance.primary_node] + list(instance.secondary_nodes)
-    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
+    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName(),
+                                  instance.hypervisor)
     for node in nodenames:
       info = nodeinfo.get(node, None)
       if not info:
@@ -4366,7 +4404,8 @@ class LUQueryInstanceData(NoHooksLU):
     result = {}
     for instance in self.wanted_instances:
       remote_info = rpc.call_instance_info(instance.primary_node,
-                                                instance.name)
+                                           instance.name,
+                                           instance.hypervisor)
       if remote_info and "state" in remote_info:
         remote_state = "up"
       else:
@@ -4390,9 +4429,10 @@ class LUQueryInstanceData(NoHooksLU):
         "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
         "disks": disks,
         "vcpus": instance.vcpus,
+        "hypervisor": instance.hypervisor,
         }
 
-      htkind = self.cfg.GetHypervisorType()
+      htkind = instance.hypervisor
       if htkind == constants.HT_XEN_PVM30:
         idict["kernel_path"] = instance.kernel_path
         idict["initrd_path"] = instance.initrd_path
@@ -4589,8 +4629,10 @@ class LUSetInstanceParams(LogicalUnit):
       pnode = self.instance.primary_node
       nodelist = [pnode]
       nodelist.extend(instance.secondary_nodes)
-      instance_info = rpc.call_instance_info(pnode, instance.name)
-      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
+      instance_info = rpc.call_instance_info(pnode, instance.name,
+                                             instance.hypervisor)
+      nodeinfo = rpc.call_node_info(nodelist, self.cfg.GetVGName(),
+                                    instance.hypervisor)
 
       if pnode not in nodeinfo or not isinstance(nodeinfo[pnode], dict):
         # Assume the primary node is unreachable and go ahead
@@ -4617,7 +4659,7 @@ class LUSetInstanceParams(LogicalUnit):
                            " node %s" % node)
 
     # Xen HVM device type checks
-    if self.cfg.GetHypervisorType() == constants.HT_XEN_HVM31:
+    if instance.hypervisor == constants.HT_XEN_HVM31:
       if self.op.hvm_nic_type is not None:
         if self.op.hvm_nic_type not in constants.HT_HVM_VALID_NIC_TYPES:
           raise errors.OpPrereqError("Invalid NIC type %s specified for Xen"
@@ -5180,12 +5222,13 @@ class IAllocator(object):
 
     """
     cfg = self.cfg
+    cluster_info = cfg.GetClusterInfo()
     # cluster data
     data = {
       "version": 1,
       "cluster_name": self.cfg.GetClusterName(),
-      "cluster_tags": list(cfg.GetClusterInfo().GetTags()),
-      "hypervisor_type": self.cfg.GetHypervisorType(),
+      "cluster_tags": list(cluster_info.GetTags()),
+      "enable_hypervisors": list(cluster_info.enabled_hypervisors),
       # we don't have job IDs
       }
 
@@ -5194,7 +5237,10 @@ class IAllocator(object):
     # node data
     node_results = {}
     node_list = cfg.GetNodeList()
-    node_data = rpc.call_node_info(node_list, cfg.GetVGName())
+    # FIXME: here we have only one hypervisor information, but
+    # instance can belong to different hypervisors
+    node_data = rpc.call_node_info(node_list, cfg.GetVGName(),
+                                   cfg.GetHypervisorType())
     for nname in node_list:
       ninfo = cfg.GetNodeInfo(nname)
       if nname not in node_data or not isinstance(node_data[nname], dict):
@@ -5250,6 +5296,7 @@ class IAllocator(object):
         "nics": nic_data,
         "disks": [{"size": dsk.size, "mode": "w"} for dsk in iinfo.disks],
         "disk_template": iinfo.disk_template,
+        "hypervisor": iinfo.hypervisor,
         }
       instance_data[iinfo.name] = pir
 
diff --git a/lib/hypervisor/__init__.py b/lib/hypervisor/__init__.py
index 4e0dcde30d02f852eb0a56b09bd359db54765ce8..d8721959784ea4ddbd802f9ac8e0dde8c6ae0f9d 100644
--- a/lib/hypervisor/__init__.py
+++ b/lib/hypervisor/__init__.py
@@ -39,17 +39,16 @@ _HYPERVISOR_MAP = {
     }
 
 
-def GetHypervisor(cfg):
+def GetHypervisor(ht_kind):
   """Return a Hypervisor instance.
 
   This function parses the cluster hypervisor configuration file and
   instantiates a class based on the value of this file.
 
-  @param cfg: Configuration object
+  @type ht_kind: string
+  @param ht_kind: The requested hypervisor type
 
   """
-  ht_kind = cfg.GetHypervisorType()
-
   if ht_kind not in _HYPERVISOR_MAP:
     raise errors.HypervisorError("Unknown hypervisor type '%s'" % ht_kind)
 
diff --git a/lib/objects.py b/lib/objects.py
index f1bc5c93e503e5afe1db3c264837727927f76288..2af22d069e7f764a17e6af66a0bd3c8667b19add 100644
--- a/lib/objects.py
+++ b/lib/objects.py
@@ -503,6 +503,7 @@ class Instance(TaggableObject):
     "name",
     "primary_node",
     "os",
+    "hypervisor",
     "status",
     "memory",
     "vcpus",
@@ -701,6 +702,7 @@ class Cluster(TaggableObject):
     "master_netdev",
     "cluster_name",
     "file_storage_dir",
+    "enabled_hypervisors",
     ]
 
   def ToDict(self):
diff --git a/lib/opcodes.py b/lib/opcodes.py
index b0ee9f9afd85689f4b0c383f3797dee817b18478..550e408992deb9f81f245a259077eed0167db7cd 100644
--- a/lib/opcodes.py
+++ b/lib/opcodes.py
@@ -327,6 +327,7 @@ class OpCreateInstance(OpCode):
     "hvm_pae", "hvm_cdrom_image_path", "vnc_bind_address",
     "file_storage_dir", "file_driver",
     "iallocator", "hvm_nic_type", "hvm_disk_type",
+    "hypervisor",
     ]
 
 
diff --git a/lib/rpc.py b/lib/rpc.py
index 2a8e98bd36af2867c83868801677e34dfbab7788..02f770856db86b76e21d63724230727d01a68127 100644
--- a/lib/rpc.py
+++ b/lib/rpc.py
@@ -211,7 +211,7 @@ def call_instance_migrate(node, instance, target, live):
 
   @type node: string
   @param node: the node on which the instance is currently running
-  @type instance: instance object
+  @type instance: C{objects.Instance}
   @param instance: the instance definition
   @type target: string
   @param target: the target node name
@@ -264,11 +264,18 @@ def call_instance_run_rename(node, inst, old_name, osdev, swapdev):
   return c.getresult().get(node, False)
 
 
-def call_instance_info(node, instance):
+def call_instance_info(node, instance, hname):
   """Returns information about a single instance.
 
   This is a single-node call.
 
+  @type node_list: list
+  @param node_list: the list of nodes to query
+  @type instance: string
+  @param instance: the instance name
+  @type hname: string
+  @param hname: the hypervisor type of the instance
+
   """
   c = Client("instance_info", [instance])
   c.connect(node)
@@ -276,25 +283,35 @@ def call_instance_info(node, instance):
   return c.getresult().get(node, False)
 
 
-def call_all_instances_info(node_list):
-  """Returns information about all instances on a given node.
+def call_all_instances_info(node_list, hypervisor_list):
+  """Returns information about all instances on the given nodes.
 
-  This is a single-node call.
+  This is a multi-node call.
+
+  @type node_list: list
+  @param node_list: the list of nodes to query
+  @type hypervisor_list: list
+  @param hypervisor_list: the hypervisors to query for instances
 
   """
-  c = Client("all_instances_info", [])
+  c = Client("all_instances_info", [hypervisor_list])
   c.connect_list(node_list)
   c.run()
   return c.getresult()
 
 
-def call_instance_list(node_list):
+def call_instance_list(node_list, hypervisor_list):
   """Returns the list of running instances on a given node.
 
-  This is a single-node call.
+  This is a multi-node call.
+
+  @type node_list: list
+  @param node_list: the list of nodes to query
+  @type hypervisor_list: list
+  @param hypervisor_list: the hypervisors to query for instances
 
   """
-  c = Client("instance_list", [])
+  c = Client("instance_list", [hypervisor_list])
   c.connect_list(node_list)
   c.run()
   return c.getresult()
@@ -312,7 +329,7 @@ def call_node_tcp_ping(node, source, target, port, timeout, live_port_needed):
   return c.getresult().get(node, False)
 
 
-def call_node_info(node_list, vg_name):
+def call_node_info(node_list, vg_name, hypervisor_type):
   """Return node information.
 
   This will return memory information and volume group size and free
@@ -320,8 +337,16 @@ def call_node_info(node_list, vg_name):
 
   This is a multi-node call.
 
+  @type node_list: list
+  @param node_list: the list of nodes to query
+  @type vgname: C{string}
+  @param vgname: the name of the volume group to ask for disk space information
+  @type hypervisor_type: C{str}
+  @param hypervisor_type: the name of the hypervisor to ask for
+      memory information
+
   """
-  c = Client("node_info", [vg_name])
+  c = Client("node_info", [vg_name, hypervisor_type])
   c.connect_list(node_list)
   c.run()
   retux = c.getresult()
diff --git a/scripts/gnt-instance b/scripts/gnt-instance
index 76e244236f79f94ad4b67cf4db08e0a7ce40a1d3..557ae0ce9eba20ea1f28096753968b282c0a0af8 100755
--- a/scripts/gnt-instance
+++ b/scripts/gnt-instance
@@ -43,7 +43,7 @@ _SHUTDOWN_INSTANCES = "instances"
 _VALUE_TRUE = "true"
 
 _LIST_DEF_FIELDS = [
-  "name", "os", "pnode", "status", "oper_ram",
+  "name", "hypervisor", "os", "pnode", "status", "oper_ram",
   ]
 
 
@@ -197,7 +197,7 @@ def ListInstances(opts, args):
       "hvm_nic_type": "HVM_NIC_type",
       "hvm_disk_type": "HVM_disk_type",
       "vnc_bind_address": "VNC_bind_address",
-      "serial_no": "SerialNo",
+      "serial_no": "SerialNo", "hypervisor": "Hypervisor",
       }
   else:
     headers = None
@@ -709,6 +709,7 @@ def ShowInstanceConfig(opts, args):
     buf.write("  Nodes:\n")
     buf.write("    - primary: %s\n" % instance["pnode"])
     buf.write("    - secondaries: %s\n" % ", ".join(instance["snodes"]))
+    buf.write("  Hypervisor: %s\n" % instance["hypervisor"])
     buf.write("  Operating system: %s\n" % instance["os"])
     if instance.has_key("network_port"):
       buf.write("  Allocated network port: %s\n" % instance["network_port"])
@@ -974,7 +975,8 @@ commands = {
            "Lists the instances and their status. The available fields are"
            " (see the man page for details): status, oper_state, oper_ram,"
            " name, os, pnode, snodes, admin_state, admin_ram, disk_template,"
-           " ip, mac, bridge, sda_size, sdb_size, vcpus, serial_no."
+           " ip, mac, bridge, sda_size, sdb_size, vcpus, serial_no,"
+           " hypervisor."
            " The default field"
            " list is (in order): %s." % ", ".join(_LIST_DEF_FIELDS),
            ),