diff --git a/doc/admin.rst b/doc/admin.rst
index d0ed24ba2ba9337eae18cccfbff7117fb8ba212b..cefa8e2e99971b76e8ad886dfc8ad726e9e6f9f9 100644
--- a/doc/admin.rst
+++ b/doc/admin.rst
@@ -320,11 +320,13 @@ them out of the Ganeti exports directory.
 Importing an instance is similar to creating a new one, but additionally
 one must specify the location of the snapshot. The command is::
 
-  gnt-backup import -n TARGET_NODE -t DISK_TEMPLATE \
+  gnt-backup import -n TARGET_NODE \
     --src-node=NODE --src-dir=DIR INSTANCE_NAME
 
-Most of the options available for the command :command:`gnt-instance
-add` are supported here too.
+By default, parameters will be read from the export information, but you
+can of course pass them in via the command line - most of the options
+available for the command :command:`gnt-instance add` are supported here
+too.
 
 Import of foreign instances
 +++++++++++++++++++++++++++
diff --git a/lib/backend.py b/lib/backend.py
index b7c046e08888772d46765c2578b74414054644d1..80d45d1b783dbede48280bc8c431b97de711ef7a 100644
--- a/lib/backend.py
+++ b/lib/backend.py
@@ -2091,6 +2091,7 @@ def FinalizeExport(instance, snap_disks):
   config.set(constants.INISECT_INS, 'vcpus', '%d' %
              instance.beparams[constants.BE_VCPUS])
   config.set(constants.INISECT_INS, 'disk_template', instance.disk_template)
+  config.set(constants.INISECT_INS, 'hypervisor', instance.hypervisor)
 
   nic_total = 0
   for nic_count, nic in enumerate(instance.nics):
@@ -2098,8 +2099,9 @@ def FinalizeExport(instance, snap_disks):
     config.set(constants.INISECT_INS, 'nic%d_mac' %
                nic_count, '%s' % nic.mac)
     config.set(constants.INISECT_INS, 'nic%d_ip' % nic_count, '%s' % nic.ip)
-    config.set(constants.INISECT_INS, 'nic%d_bridge' % nic_count,
-               '%s' % nic.bridge)
+    for param in constants.NICS_PARAMETER_TYPES:
+      config.set(constants.INISECT_INS, 'nic%d_%s' % (nic_count, param),
+                 '%s' % nic.nicparams.get(param, None))
   # TODO: redundant: on load can read nics until it doesn't exist
   config.set(constants.INISECT_INS, 'nic_count' , '%d' % nic_total)
 
@@ -2116,6 +2118,17 @@ def FinalizeExport(instance, snap_disks):
 
   config.set(constants.INISECT_INS, 'disk_count' , '%d' % disk_total)
 
+  # New-style hypervisor/backend parameters
+
+  config.add_section(constants.INISECT_HYP)
+  for name, value in instance.hvparams.items():
+    if name not in constants.HVC_GLOBALS:
+      config.set(constants.INISECT_HYP, name, str(value))
+
+  config.add_section(constants.INISECT_BEP)
+  for name, value in instance.beparams.items():
+    config.set(constants.INISECT_BEP, name, str(value))
+
   utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE),
                   data=config.Dumps())
   shutil.rmtree(finaldestdir, ignore_errors=True)
diff --git a/lib/cli.py b/lib/cli.py
index 00c6dfdc1aff005955a8ed708dd207ef4e847ba2..fb5d519764acc3908368b3c87fe712dd8568b100 100644
--- a/lib/cli.py
+++ b/lib/cli.py
@@ -72,6 +72,7 @@ __all__ = [
   "HVOPTS_OPT",
   "HYPERVISOR_OPT",
   "IALLOCATOR_OPT",
+  "IDENTIFY_DEFAULTS_OPT",
   "IGNORE_CONSIST_OPT",
   "IGNORE_FAILURES_OPT",
   "IGNORE_REMOVE_FAILURES_OPT",
@@ -948,6 +949,13 @@ MAINTAIN_NODE_HEALTH_OPT = \
                " health, by shutting down unknown instances, shutting down"
                " unknown DRBD devices, etc.")
 
+IDENTIFY_DEFAULTS_OPT = \
+    cli_option("--identify-defaults", dest="identify_defaults",
+               default=False, action="store_true",
+               help="Identify which saved instance parameters are equal to"
+               " the current cluster defaults and set them as such, instead"
+               " of marking them as overridden")
+
 
 def _ParseArgs(argv, commands, aliases):
   """Parser for the command line arguments.
@@ -1563,9 +1571,12 @@ def GenericInstanceCreate(mode, opts, args):
   elif opts.no_nics:
     # no nics
     nics = []
-  else:
+  elif mode == constants.INSTANCE_CREATE:
     # default of one nic, all auto
     nics = [{}]
+  else:
+    # mode == import
+    nics = []
 
   if opts.disk_template == constants.DT_DISKLESS:
     if opts.disks or opts.sd_size is not None:
@@ -1573,18 +1584,23 @@ def GenericInstanceCreate(mode, opts, args):
                                  " information passed")
     disks = []
   else:
-    if not opts.disks and not opts.sd_size:
+    if (not opts.disks and not opts.sd_size
+        and mode == constants.INSTANCE_CREATE):
       raise errors.OpPrereqError("No disk information specified")
     if opts.disks and opts.sd_size is not None:
       raise errors.OpPrereqError("Please use either the '--disk' or"
                                  " '-s' option")
     if opts.sd_size is not None:
       opts.disks = [(0, {"size": opts.sd_size})]
-    try:
-      disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
-    except ValueError, err:
-      raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
-    disks = [{}] * disk_max
+
+    if opts.disks:
+      try:
+        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
+      except ValueError, err:
+        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
+      disks = [{}] * disk_max
+    else:
+      disks = []
     for didx, ddict in opts.disks:
       didx = int(didx)
       if not isinstance(ddict, dict):
@@ -1618,12 +1634,14 @@ def GenericInstanceCreate(mode, opts, args):
     src_node = None
     src_path = None
     no_install = opts.no_install
+    identify_defaults = False
   elif mode == constants.INSTANCE_IMPORT:
     start = False
     os_type = None
     src_node = opts.src_node
     src_path = opts.src_dir
     no_install = None
+    identify_defaults = opts.identify_defaults
   else:
     raise errors.ProgrammerError("Invalid creation mode %s" % mode)
 
@@ -1646,7 +1664,8 @@ def GenericInstanceCreate(mode, opts, args):
                                 os_type=os_type,
                                 src_node=src_node,
                                 src_path=src_path,
-                                no_install=no_install)
+                                no_install=no_install,
+                                identify_defaults=identify_defaults)
 
   SubmitOrSend(op, opts)
   return 0
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index cf07862d0eec52d24a1ff0ef04d4cad88799edc9..55749cdec66aab290fca0081c8ad3cc7634d571d 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -568,6 +568,9 @@ def _CheckDiskTemplate(template):
     msg = ("Invalid disk template name '%s', valid templates are: %s" %
            (template, utils.CommaJoin(constants.DISK_TEMPLATES)))
     raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
+  if template == constants.DT_FILE and not constants.ENABLE_FILE_STORAGE:
+    raise errors.OpPrereqError("File storage disabled at configure time",
+                               errors.ECODE_INVAL)
 
 
 def _CheckInstanceDown(lu, instance, reason):
@@ -5984,7 +5987,7 @@ class LUCreateInstance(LogicalUnit):
   """
   HPATH = "instance-add"
   HTYPE = constants.HTYPE_INSTANCE
-  _OP_REQP = ["instance_name", "disks", "disk_template",
+  _OP_REQP = ["instance_name", "disks",
               "mode", "start",
               "wait_for_sync", "ip_check", "nics",
               "hvparams", "beparams"]
@@ -5995,7 +5998,8 @@ class LUCreateInstance(LogicalUnit):
 
     """
     # set optional parameters to none if they don't exist
-    for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
+    for attr in ["pnode", "snode", "iallocator", "hypervisor",
+                 "disk_template", "identify_defaults"]:
       if not hasattr(self.op, attr):
         setattr(self.op, attr, None)
 
@@ -6014,10 +6018,6 @@ class LUCreateInstance(LogicalUnit):
       # TODO: make the ip check more flexible and not depend on the name check
       raise errors.OpPrereqError("Cannot do ip checks without a name check",
                                  errors.ECODE_INVAL)
-    if (self.op.disk_template == constants.DT_FILE and
-        not constants.ENABLE_FILE_STORAGE):
-      raise errors.OpPrereqError("File storage disabled at configure time",
-                                 errors.ECODE_INVAL)
     # check disk information: either all adopt, or no adopt
     has_adopt = has_no_adopt = False
     for disk in self.op.disks:
@@ -6026,7 +6026,7 @@ class LUCreateInstance(LogicalUnit):
       else:
         has_no_adopt = True
     if has_adopt and has_no_adopt:
-      raise errors.OpPrereqError("Either all disks have are adoped or none is",
+      raise errors.OpPrereqError("Either all disks are adopted or none is",
                                  errors.ECODE_INVAL)
     if has_adopt:
       if self.op.disk_template != constants.DT_PLAIN:
@@ -6042,162 +6042,21 @@ class LUCreateInstance(LogicalUnit):
 
     self.adopt_disks = has_adopt
 
-  def ExpandNames(self):
-    """ExpandNames for CreateInstance.
-
-    Figure out the right locks for instance creation.
-
-    """
-    self.needed_locks = {}
-
-    # cheap checks, mostly valid constants given
-
     # verify creation mode
     if self.op.mode not in (constants.INSTANCE_CREATE,
                             constants.INSTANCE_IMPORT):
       raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
                                  self.op.mode, errors.ECODE_INVAL)
 
-    # disk template and mirror node verification
-    _CheckDiskTemplate(self.op.disk_template)
-
-    if self.op.hypervisor is None:
-      self.op.hypervisor = self.cfg.GetHypervisorType()
-
-    cluster = self.cfg.GetClusterInfo()
-    enabled_hvs = cluster.enabled_hypervisors
-    if self.op.hypervisor not in enabled_hvs:
-      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
-                                 " cluster (%s)" % (self.op.hypervisor,
-                                  ",".join(enabled_hvs)),
-                                 errors.ECODE_STATE)
-
-    # check hypervisor parameter syntax (locally)
-    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
-    filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
-                                  self.op.hvparams)
-    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
-    hv_type.CheckParameterSyntax(filled_hvp)
-    self.hv_full = filled_hvp
-    # check that we don't specify global parameters on an instance
-    _CheckGlobalHvParams(self.op.hvparams)
-
-    # fill and remember the beparams dict
-    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
-    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
-                                    self.op.beparams)
-
-    #### instance parameters check
-
     # instance name verification
     if self.op.name_check:
-      hostname1 = utils.GetHostInfo(self.op.instance_name)
-      self.op.instance_name = instance_name = hostname1.name
+      self.hostname1 = utils.GetHostInfo(self.op.instance_name)
+      self.op.instance_name = self.hostname1.name
       # used in CheckPrereq for ip ping check
-      self.check_ip = hostname1.ip
+      self.check_ip = self.hostname1.ip
     else:
-      instance_name = self.op.instance_name
       self.check_ip = None
 
-    # this is just a preventive check, but someone might still add this
-    # instance in the meantime, and creation will fail at lock-add time
-    if instance_name in self.cfg.GetInstanceList():
-      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
-                                 instance_name, errors.ECODE_EXISTS)
-
-    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
-
-    # NIC buildup
-    self.nics = []
-    for idx, nic in enumerate(self.op.nics):
-      nic_mode_req = nic.get("mode", None)
-      nic_mode = nic_mode_req
-      if nic_mode is None:
-        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
-
-      # in routed mode, for the first nic, the default ip is 'auto'
-      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
-        default_ip_mode = constants.VALUE_AUTO
-      else:
-        default_ip_mode = constants.VALUE_NONE
-
-      # ip validity checks
-      ip = nic.get("ip", default_ip_mode)
-      if ip is None or ip.lower() == constants.VALUE_NONE:
-        nic_ip = None
-      elif ip.lower() == constants.VALUE_AUTO:
-        if not self.op.name_check:
-          raise errors.OpPrereqError("IP address set to auto but name checks"
-                                     " have been skipped. Aborting.",
-                                     errors.ECODE_INVAL)
-        nic_ip = hostname1.ip
-      else:
-        if not utils.IsValidIP(ip):
-          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
-                                     " like a valid IP" % ip,
-                                     errors.ECODE_INVAL)
-        nic_ip = ip
-
-      # TODO: check the ip address for uniqueness
-      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
-        raise errors.OpPrereqError("Routed nic mode requires an ip address",
-                                   errors.ECODE_INVAL)
-
-      # MAC address verification
-      mac = nic.get("mac", constants.VALUE_AUTO)
-      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-        mac = utils.NormalizeAndValidateMac(mac)
-
-        try:
-          self.cfg.ReserveMAC(mac, self.proc.GetECId())
-        except errors.ReservationError:
-          raise errors.OpPrereqError("MAC address %s already in use"
-                                     " in cluster" % mac,
-                                     errors.ECODE_NOTUNIQUE)
-
-      # bridge verification
-      bridge = nic.get("bridge", None)
-      link = nic.get("link", None)
-      if bridge and link:
-        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
-                                   " at the same time", errors.ECODE_INVAL)
-      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
-        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
-                                   errors.ECODE_INVAL)
-      elif bridge:
-        link = bridge
-
-      nicparams = {}
-      if nic_mode_req:
-        nicparams[constants.NIC_MODE] = nic_mode_req
-      if link:
-        nicparams[constants.NIC_LINK] = link
-
-      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
-                                      nicparams)
-      objects.NIC.CheckParameterSyntax(check_params)
-      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
-
-    # disk checks/pre-build
-    self.disks = []
-    for disk in self.op.disks:
-      mode = disk.get("mode", constants.DISK_RDWR)
-      if mode not in constants.DISK_ACCESS_SET:
-        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
-                                   mode, errors.ECODE_INVAL)
-      size = disk.get("size", None)
-      if size is None:
-        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
-      try:
-        size = int(size)
-      except (TypeError, ValueError):
-        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
-                                   errors.ECODE_INVAL)
-      new_disk = {"size": size, "mode": mode}
-      if "adopt" in disk:
-        new_disk["adopt"] = disk["adopt"]
-      self.disks.append(new_disk)
-
     # file storage checks
     if (self.op.file_driver and
         not self.op.file_driver in constants.FILE_DRIVER):
@@ -6214,6 +6073,41 @@ class LUCreateInstance(LogicalUnit):
                                  " node must be given",
                                  errors.ECODE_INVAL)
 
+    if self.op.mode == constants.INSTANCE_IMPORT:
+      # On import force_variant must be True, because if we forced it at
+      # initial install, our only chance when importing it back is that it
+      # works again!
+      self.op.force_variant = True
+
+      if self.op.no_install:
+        self.LogInfo("No-installation mode has no effect during import")
+
+    else: # INSTANCE_CREATE
+      if getattr(self.op, "os_type", None) is None:
+        raise errors.OpPrereqError("No guest OS specified",
+                                   errors.ECODE_INVAL)
+      self.op.force_variant = getattr(self.op, "force_variant", False)
+      if self.op.disk_template is None:
+        raise errors.OpPrereqError("No disk template specified",
+                                   errors.ECODE_INVAL)
+
+  def ExpandNames(self):
+    """ExpandNames for CreateInstance.
+
+    Figure out the right locks for instance creation.
+
+    """
+    self.needed_locks = {}
+
+    instance_name = self.op.instance_name
+    # this is just a preventive check, but someone might still add this
+    # instance in the meantime, and creation will fail at lock-add time
+    if instance_name in self.cfg.GetInstanceList():
+      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
+                                 instance_name, errors.ECODE_EXISTS)
+
+    self.add_locks[locking.LEVEL_INSTANCE] = instance_name
+
     if self.op.iallocator:
       self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
     else:
@@ -6247,20 +6141,6 @@ class LUCreateInstance(LogicalUnit):
           self.op.src_path = src_path = \
             utils.PathJoin(constants.EXPORT_DIR, src_path)
 
-      # On import force_variant must be True, because if we forced it at
-      # initial install, our only chance when importing it back is that it
-      # works again!
-      self.op.force_variant = True
-
-      if self.op.no_install:
-        self.LogInfo("No-installation mode has no effect during import")
-
-    else: # INSTANCE_CREATE
-      if getattr(self.op, "os_type", None) is None:
-        raise errors.OpPrereqError("No guest OS specified",
-                                   errors.ECODE_INVAL)
-      self.op.force_variant = getattr(self.op, "force_variant", False)
-
   def _RunAllocator(self):
     """Run the allocator based on input opcode.
 
@@ -6332,50 +6212,278 @@ class LUCreateInstance(LogicalUnit):
           self.secondaries)
     return env, nl, nl
 
+  def _ReadExportInfo(self):
+    """Reads the export information from disk.
+
+    It will override the opcode source node and path with the actual
+    information, if these two were not specified before.
+
+    @return: the export information
+
+    """
+    assert self.op.mode == constants.INSTANCE_IMPORT
+
+    src_node = self.op.src_node
+    src_path = self.op.src_path
+
+    if src_node is None:
+      locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
+      exp_list = self.rpc.call_export_list(locked_nodes)
+      found = False
+      for node in exp_list:
+        if exp_list[node].fail_msg:
+          continue
+        if src_path in exp_list[node].payload:
+          found = True
+          self.op.src_node = src_node = node
+          self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
+                                                       src_path)
+          break
+      if not found:
+        raise errors.OpPrereqError("No export found for relative path %s" %
+                                    src_path, errors.ECODE_INVAL)
+
+    _CheckNodeOnline(self, src_node)
+    result = self.rpc.call_export_info(src_node, src_path)
+    result.Raise("No export or invalid export found in dir %s" % src_path)
+
+    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
+    if not export_info.has_section(constants.INISECT_EXP):
+      raise errors.ProgrammerError("Corrupted export config",
+                                   errors.ECODE_ENVIRON)
+
+    ei_version = export_info.get(constants.INISECT_EXP, "version")
+    if (int(ei_version) != constants.EXPORT_VERSION):
+      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
+                                 (ei_version, constants.EXPORT_VERSION),
+                                 errors.ECODE_ENVIRON)
+    return export_info
+
+  def _ReadExportParams(self, einfo):
+    """Use export parameters as defaults.
+
+    In case the opcode doesn't specify (as in override) some instance
+    parameters, then try to use them from the export information, if
+    that declares them.
+
+    """
+    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
+
+    if self.op.disk_template is None:
+      if einfo.has_option(constants.INISECT_INS, "disk_template"):
+        self.op.disk_template = einfo.get(constants.INISECT_INS,
+                                          "disk_template")
+      else:
+        raise errors.OpPrereqError("No disk template specified and the export"
+                                   " is missing the disk_template information",
+                                   errors.ECODE_INVAL)
+
+    if not self.op.disks:
+      if einfo.has_option(constants.INISECT_INS, "disk_count"):
+        disks = []
+        # TODO: import the disk iv_name too
+        for idx in range(einfo.getint(constants.INISECT_INS, "disk_count")):
+          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
+          disks.append({"size": disk_sz})
+        self.op.disks = disks
+      else:
+        raise errors.OpPrereqError("No disk info specified and the export"
+                                   " is missing the disk information",
+                                   errors.ECODE_INVAL)
+
+    if (not self.op.nics and
+        einfo.has_option(constants.INISECT_INS, "nic_count")):
+      nics = []
+      for idx in range(einfo.getint(constants.INISECT_INS, "nic_count")):
+        ndict = {}
+        for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
+          v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
+          ndict[name] = v
+        nics.append(ndict)
+      self.op.nics = nics
+
+    if (self.op.hypervisor is None and
+        einfo.has_option(constants.INISECT_INS, "hypervisor")):
+      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
+    if einfo.has_section(constants.INISECT_HYP):
+      # use the export parameters but do not override the ones
+      # specified by the user
+      for name, value in einfo.items(constants.INISECT_HYP):
+        if name not in self.op.hvparams:
+          self.op.hvparams[name] = value
+
+    if einfo.has_section(constants.INISECT_BEP):
+      # use the parameters, without overriding
+      for name, value in einfo.items(constants.INISECT_BEP):
+        if name not in self.op.beparams:
+          self.op.beparams[name] = value
+    else:
+      # try to read the parameters old style, from the main section
+      for name in constants.BES_PARAMETERS:
+        if (name not in self.op.beparams and
+            einfo.has_option(constants.INISECT_INS, name)):
+          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
+
+  def _RevertToDefaults(self, cluster):
+    """Revert the instance parameters to the default values.
+
+    """
+    # hvparams
+    hv_defs = cluster.GetHVDefaults(self.op.hypervisor, self.op.os_type)
+    for name in self.op.hvparams.keys():
+      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
+        del self.op.hvparams[name]
+    # beparams
+    be_defs = cluster.beparams.get(constants.PP_DEFAULT, {})
+    for name in self.op.beparams.keys():
+      if name in be_defs and be_defs[name] == self.op.beparams[name]:
+        del self.op.beparams[name]
+    # nic params
+    nic_defs = cluster.nicparams.get(constants.PP_DEFAULT, {})
+    for nic in self.op.nics:
+      for name in constants.NICS_PARAMETERS:
+        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
+          del nic[name]
+
   def CheckPrereq(self):
     """Check prerequisites.
 
     """
+    if self.op.mode == constants.INSTANCE_IMPORT:
+      export_info = self._ReadExportInfo()
+      self._ReadExportParams(export_info)
+
+    _CheckDiskTemplate(self.op.disk_template)
+
     if (not self.cfg.GetVGName() and
         self.op.disk_template not in constants.DTS_NOT_LVM):
       raise errors.OpPrereqError("Cluster does not support lvm-based"
                                  " instances", errors.ECODE_STATE)
 
-    if self.op.mode == constants.INSTANCE_IMPORT:
-      src_node = self.op.src_node
-      src_path = self.op.src_path
+    if self.op.hypervisor is None:
+      self.op.hypervisor = self.cfg.GetHypervisorType()
 
-      if src_node is None:
-        locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
-        exp_list = self.rpc.call_export_list(locked_nodes)
-        found = False
-        for node in exp_list:
-          if exp_list[node].fail_msg:
-            continue
-          if src_path in exp_list[node].payload:
-            found = True
-            self.op.src_node = src_node = node
-            self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
-                                                         src_path)
-            break
-        if not found:
-          raise errors.OpPrereqError("No export found for relative path %s" %
-                                      src_path, errors.ECODE_INVAL)
-
-      _CheckNodeOnline(self, src_node)
-      result = self.rpc.call_export_info(src_node, src_path)
-      result.Raise("No export or invalid export found in dir %s" % src_path)
-
-      export_info = objects.SerializableConfigParser.Loads(str(result.payload))
-      if not export_info.has_section(constants.INISECT_EXP):
-        raise errors.ProgrammerError("Corrupted export config",
-                                     errors.ECODE_ENVIRON)
-
-      ei_version = export_info.get(constants.INISECT_EXP, 'version')
-      if (int(ei_version) != constants.EXPORT_VERSION):
-        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
-                                   (ei_version, constants.EXPORT_VERSION),
-                                   errors.ECODE_ENVIRON)
+    cluster = self.cfg.GetClusterInfo()
+    enabled_hvs = cluster.enabled_hypervisors
+    if self.op.hypervisor not in enabled_hvs:
+      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
+                                 " cluster (%s)" % (self.op.hypervisor,
+                                  ",".join(enabled_hvs)),
+                                 errors.ECODE_STATE)
+
+    # check hypervisor parameter syntax (locally)
+    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
+    filled_hvp = objects.FillDict(cluster.GetHVDefaults(self.op.hypervisor,
+                                                        self.op.os_type),
+                                  self.op.hvparams)
+    hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
+    hv_type.CheckParameterSyntax(filled_hvp)
+    self.hv_full = filled_hvp
+    # check that we don't specify global parameters on an instance
+    _CheckGlobalHvParams(self.op.hvparams)
+
+    # fill and remember the beparams dict
+    utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
+    self.be_full = objects.FillDict(cluster.beparams[constants.PP_DEFAULT],
+                                    self.op.beparams)
+
+    # now that hvp/bep are in final format, let's reset to defaults,
+    # if told to do so
+    if self.op.identify_defaults:
+      self._RevertToDefaults(cluster)
+
+    # NIC buildup
+    self.nics = []
+    for idx, nic in enumerate(self.op.nics):
+      nic_mode_req = nic.get("mode", None)
+      nic_mode = nic_mode_req
+      if nic_mode is None:
+        nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
+
+      # in routed mode, for the first nic, the default ip is 'auto'
+      if nic_mode == constants.NIC_MODE_ROUTED and idx == 0:
+        default_ip_mode = constants.VALUE_AUTO
+      else:
+        default_ip_mode = constants.VALUE_NONE
+
+      # ip validity checks
+      ip = nic.get("ip", default_ip_mode)
+      if ip is None or ip.lower() == constants.VALUE_NONE:
+        nic_ip = None
+      elif ip.lower() == constants.VALUE_AUTO:
+        if not self.op.name_check:
+          raise errors.OpPrereqError("IP address set to auto but name checks"
+                                     " have been skipped. Aborting.",
+                                     errors.ECODE_INVAL)
+        nic_ip = self.hostname1.ip
+      else:
+        if not utils.IsValidIP(ip):
+          raise errors.OpPrereqError("Given IP address '%s' doesn't look"
+                                     " like a valid IP" % ip,
+                                     errors.ECODE_INVAL)
+        nic_ip = ip
+
+      # TODO: check the ip address for uniqueness
+      if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
+        raise errors.OpPrereqError("Routed nic mode requires an ip address",
+                                   errors.ECODE_INVAL)
+
+      # MAC address verification
+      mac = nic.get("mac", constants.VALUE_AUTO)
+      if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
+        mac = utils.NormalizeAndValidateMac(mac)
+
+        try:
+          self.cfg.ReserveMAC(mac, self.proc.GetECId())
+        except errors.ReservationError:
+          raise errors.OpPrereqError("MAC address %s already in use"
+                                     " in cluster" % mac,
+                                     errors.ECODE_NOTUNIQUE)
+
+      # bridge verification
+      bridge = nic.get("bridge", None)
+      link = nic.get("link", None)
+      if bridge and link:
+        raise errors.OpPrereqError("Cannot pass 'bridge' and 'link'"
+                                   " at the same time", errors.ECODE_INVAL)
+      elif bridge and nic_mode == constants.NIC_MODE_ROUTED:
+        raise errors.OpPrereqError("Cannot pass 'bridge' on a routed nic",
+                                   errors.ECODE_INVAL)
+      elif bridge:
+        link = bridge
+
+      nicparams = {}
+      if nic_mode_req:
+        nicparams[constants.NIC_MODE] = nic_mode_req
+      if link:
+        nicparams[constants.NIC_LINK] = link
+
+      check_params = objects.FillDict(cluster.nicparams[constants.PP_DEFAULT],
+                                      nicparams)
+      objects.NIC.CheckParameterSyntax(check_params)
+      self.nics.append(objects.NIC(mac=mac, ip=nic_ip, nicparams=nicparams))
+
+    # disk checks/pre-build
+    self.disks = []
+    for disk in self.op.disks:
+      mode = disk.get("mode", constants.DISK_RDWR)
+      if mode not in constants.DISK_ACCESS_SET:
+        raise errors.OpPrereqError("Invalid disk access mode '%s'" %
+                                   mode, errors.ECODE_INVAL)
+      size = disk.get("size", None)
+      if size is None:
+        raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
+      try:
+        size = int(size)
+      except (TypeError, ValueError):
+        raise errors.OpPrereqError("Invalid disk size '%s'" % size,
+                                   errors.ECODE_INVAL)
+      new_disk = {"size": size, "mode": mode}
+      if "adopt" in disk:
+        new_disk["adopt"] = disk["adopt"]
+      self.disks.append(new_disk)
+
+    if self.op.mode == constants.INSTANCE_IMPORT:
 
       # Check that the new instance doesn't have less disks than the export
       instance_disks = len(self.disks)
@@ -6386,14 +6494,13 @@ class LUCreateInstance(LogicalUnit):
                                    (instance_disks, export_disks),
                                    errors.ECODE_INVAL)
 
-      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
       disk_images = []
       for idx in range(export_disks):
         option = 'disk%d_dump' % idx
         if export_info.has_option(constants.INISECT_INS, option):
           # FIXME: are the old os-es, disk sizes, etc. useful?
           export_name = export_info.get(constants.INISECT_INS, option)
-          image = utils.PathJoin(src_path, export_name)
+          image = utils.PathJoin(self.op.src_path, export_name)
           disk_images.append(image)
         else:
           disk_images.append(False)
@@ -6401,8 +6508,12 @@ class LUCreateInstance(LogicalUnit):
       self.src_images = disk_images
 
       old_name = export_info.get(constants.INISECT_INS, 'name')
-      # FIXME: int() here could throw a ValueError on broken exports
-      exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
+      try:
+        exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count')
+      except (TypeError, ValueError), err:
+        raise errors.OpPrereqError("Invalid export file, nic_count is not"
+                                   " an integer: %s" % str(err),
+                                   errors.ECODE_STATE)
       if self.op.instance_name == old_name:
         for idx, nic in enumerate(self.nics):
           if nic.mac == constants.VALUE_AUTO and exp_nic_count >= idx:
diff --git a/lib/constants.py b/lib/constants.py
index 8029028d70fd0f3fc87944d81e2de34553ac26d6..4d0179b2441a0a95fbda8e693a33eb9f15db52e2 100644
--- a/lib/constants.py
+++ b/lib/constants.py
@@ -319,6 +319,8 @@ FILE_DRIVER = frozenset([FD_LOOP, FD_BLKTAP])
 # import/export config options
 INISECT_EXP = "export"
 INISECT_INS = "instance"
+INISECT_HYP = "hypervisor"
+INISECT_BEP = "backend"
 
 # dynamic device modification
 
diff --git a/lib/objects.py b/lib/objects.py
index 79300c1d042f2acd8413a2d8a516399364e7cee2..f03cb9fb67c66c7734e1aac71aa2edfcf77d965e 100644
--- a/lib/objects.py
+++ b/lib/objects.py
@@ -934,6 +934,30 @@ class Cluster(TaggableObject):
       obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
     return obj
 
+  def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
+    """Get the default hypervisor parameters for the cluster.
+
+    @param hypervisor: the hypervisor name
+    @param os_name: if specified, we'll also update the defaults for this OS
+    @param skip_keys: if passed, list of keys not to use
+    @return: the defaults dict
+
+    """
+    if skip_keys is None:
+      skip_keys = []
+
+    fill_stack = [self.hvparams.get(hypervisor, {})]
+    if os_name is not None:
+      os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
+      fill_stack.append(os_hvp)
+
+    ret_dict = {}
+    for o_dict in fill_stack:
+      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
+
+    return ret_dict
+
+
   def FillHV(self, instance, skip_globals=False):
     """Fill an instance's hvparams dict.
 
@@ -952,18 +976,9 @@ class Cluster(TaggableObject):
     else:
       skip_keys = []
 
-    # We fill the list from least to most important override
-    fill_stack = [
-      self.hvparams.get(instance.hypervisor, {}),
-      self.os_hvp.get(instance.os, {}).get(instance.hypervisor, {}),
-      instance.hvparams,
-      ]
-
-    ret_dict = {}
-    for o_dict in fill_stack:
-      ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
-
-    return ret_dict
+    def_dict = self.GetHVDefaults(instance.hypervisor, instance.os,
+                                  skip_keys=skip_keys)
+    return FillDict(def_dict, instance.hvparams, skip_keys=skip_keys)
 
   def FillBE(self, instance):
     """Fill an instance's beparams dict.
diff --git a/lib/opcodes.py b/lib/opcodes.py
index b97c01dbc062953dff6be67a7193363f1c4e3464..42d0505f5516af3a82fcead4686ae096e9b21cc5 100644
--- a/lib/opcodes.py
+++ b/lib/opcodes.py
@@ -465,7 +465,7 @@ class OpCreateInstance(OpCode):
     "os_type", "force_variant", "no_install",
     "pnode", "disk_template", "snode", "mode",
     "disks", "nics",
-    "src_node", "src_path", "start",
+    "src_node", "src_path", "start", "identify_defaults",
     "wait_for_sync", "ip_check", "name_check",
     "file_storage_dir", "file_driver",
     "iallocator",
diff --git a/man/gnt-backup.sgml b/man/gnt-backup.sgml
index ff6c3cee9a45dbf02b86ac84642ce5ecfa86d4d2..b8764fe4244776d09ec8a95b3d129b59dfe1a06f 100644
--- a/man/gnt-backup.sgml
+++ b/man/gnt-backup.sgml
@@ -21,6 +21,7 @@
       <year>2007</year>
       <year>2008</year>
       <year>2009</year>
+      <year>2010</year>
       <holder>Google Inc.</holder>
     </copyright>
     &dhdate;
@@ -29,7 +30,7 @@
     &dhucpackage;
 
     &dhsection;
-    <refmiscinfo>ganeti 2.0</refmiscinfo>
+    <refmiscinfo>ganeti 2.1</refmiscinfo>
   </refmeta>
   <refnamediv>
     <refname>&dhpackage;</refname>
@@ -143,7 +144,7 @@
         <arg>--src-dir=<replaceable>source-dir</replaceable></arg>
         <sbr>
 
-        <arg choice="req">-t<group>
+        <arg choice="opt">-t<group>
             <arg>diskless</arg>
             <arg>plain</arg>
             <arg>drbd</arg>
@@ -151,8 +152,12 @@
           </group></arg>
         <sbr>
 
+        <arg choice="opt">--identify-defaults</arg>
+        <sbr>
+
         <arg choice="req"><replaceable>instance</replaceable></arg>
       </cmdsynopsis>
+
       <para>
         Imports a new instance from an export residing on
         <replaceable>source-node</replaceable> in
@@ -167,11 +172,11 @@
       <para>
         The <option>disk</option> option specifies the parameters for
         the disks of the instance. The numbering of disks starts at
-        zero, and at least one disk needs to be passed. For each disk,
-        at least the size needs to be given, and optionally the access
-        mode (read-only or the default of read-write) can also be
-        specified.  The size is interpreted (when no unit is given) in
-        mebibytes. You can also use one of the suffixes
+        zero. For each disk, at least the size needs to be given, and
+        optionally the access mode (read-only or the default of
+        read-write) can also be specified.  The size is interpreted
+        (when no unit is given) in mebibytes. You can also use one of
+        the suffixes
         <literal>m</literal>, <literal>g</literal> or
         <literal>t</literal> to specificy the exact the units used;
         these suffixes map to mebibytes, gibibytes and tebibytes.
@@ -185,7 +190,13 @@
       </para>
 
       <para>
-        The minimum disk specification is therefore
+        If no disk information is passed, the disk configuration saved
+        at export time will be used.
+      </para>
+
+      <para>
+        The minimum disk specification is therefore empty (export
+        information will be used), a single disk can be specified as
         <userinput>--disk 0:size=20G</userinput> (or <userinput>-s
           20G</userinput> when using the <option>-s</option> option),
         and a three-disk instance can be specified as
@@ -195,10 +206,10 @@
 
       <para>
         The NICs of the instances can be specified via the
-        <option>--net</option> option. By default, one NIC is created
-        for the instance, with the MAC set to the original MAC of the
-        instance (as it was at export time). Each NIC can take up to
-        three parameters (all optional):
+        <option>--net</option> option. By default, the NIC
+        configuration of the original (exported) instance will be
+        reused. Each NIC can take up to three parameters (all
+        optional):
         <variablelist>
           <varlistentry>
             <term>mac</term>
@@ -240,15 +251,15 @@
       </para>
 
       <para>
-        Alternatively, if no network is desired for the instance, you
-        can prevent the default of one NIC with the
-        <option>--no-nics</option> option.
+        If no network is desired for the instance, you should create a
+        single empty NIC and delete it afterwards
+        via <command>gnt-instance modify --net delete</command>.
       </para>
 
       <para>
         The <option>-B</option> option specifies the backend
         parameters for the instance. If no such parameters are
-        specified, the values are inherited from the cluster. Possible
+        specified, the values are inherited from the export. Possible
         parameters are:
         <variablelist>
           <varlistentry>
@@ -278,8 +289,9 @@
       </para>
 
       <para>
-        The <option>-t</option> options specifies the disk layout type for
-        the instance. The available choices are:
+        The <option>-t</option> options specifies the disk layout type
+        for the instance. If not passed, the configuration of the
+        original instance is used. The available choices are:
         <variablelist>
           <varlistentry>
             <term>diskless</term>
@@ -334,13 +346,26 @@
       </para>
 
       <para>
-        If you do not want gnt-backup to wait for the disk mirror
-        to be synced, use the <option>--no-wait-for-sync</option>
-        option.
+        Since many of the parameters are by default read from the
+        exported instance information and used as such, the new
+        instance will have all parameters explicitly specified, the
+        opposite of a newly added instance which has most parameters
+        specified via cluster defaults. To change the import behaviour
+        to recognize parameters whose saved value matches the current
+        cluster default and mark it as such (default value), pass
+        the <option>--identify-defaults</option> option. This will
+        affect the hypervisor, backend and NIC parameters, both read
+        from the export file and passed in via the command line.
       </para>
 
       <para>
-        Example:
+        Example for identical instance import:
+        <screen>
+# gnt-backup import -n node1.example.com instance3.example.com
+        </screen>
+      </para>
+      <para>
+        Explicit configuration example:
         <screen>
 # gnt-backup import -t plain --disk 0:size=1G -B memory=512 \
 > -n node1.example.com \
diff --git a/qa/qa-sample.json b/qa/qa-sample.json
index 8f6f4db886f8a9673adca5fea688ed1bf77b4a80..ae09588c02ce7a3c481d03e64d64d8ad816f1400 100644
--- a/qa/qa-sample.json
+++ b/qa/qa-sample.json
@@ -6,6 +6,9 @@
   "os": "debian-etch",
   "mem": "512M",
 
+  "rapi-user": "foobar",
+  "rapi-pass": "barfoo",
+
   "# Lists of disk sizes": null,
   "disk": ["1G", "512M"],
   "disk-growth": ["2G", "768M"],
diff --git a/qa/qa_cluster.py b/qa/qa_cluster.py
index 65c975126d7d5696a5c801a9bef89fb484a0a863..384ca0e08df2f4ad98fd8d71c992da8e73bf61b1 100644
--- a/qa/qa_cluster.py
+++ b/qa/qa_cluster.py
@@ -78,6 +78,21 @@ def TestClusterInit():
   AssertEqual(StartSSH(master['primary'],
                        utils.ShellQuoteArgs(cmd)).wait(), 0)
 
+  # Create RAPI credentials
+  rapi_user = qa_config.get("rapi-user", default=None)
+  rapi_pass = qa_config.get("rapi-pass", default=None)
+
+  if rapi_user and rapi_pass:
+    cmds = []
+
+    cred_string = "%s %s write" % (rapi_user, rapi_pass)
+    cmds.append(("echo %s >> %s" %
+                 (utils.ShellQuote(cred_string),
+                  utils.ShellQuote(constants.RAPI_USERS_FILE))))
+    cmds.append("%s stop-master" % constants.DAEMON_UTIL)
+    cmds.append("%s start-master" % constants.DAEMON_UTIL)
+    AssertEqual(StartSSH(master['primary'], ' && '.join(cmds)).wait(), 0)
+
 
 def TestClusterRename():
   """gnt-cluster rename"""
diff --git a/scripts/gnt-backup b/scripts/gnt-backup
index 612cb21bc058e94ec693552ce48e4b220db08fbe..b47d7587602ce01b7051c853cbf43597637adb3f 100755
--- a/scripts/gnt-backup
+++ b/scripts/gnt-backup
@@ -130,22 +130,23 @@ def RemoveExport(opts, args):
 
 # this is defined separately due to readability only
 import_opts = [
-  NODE_PLACEMENT_OPT,
   BACKEND_OPT,
-  DISK_TEMPLATE_OPT,
   DISK_OPT,
-  OS_SIZE_OPT,
+  DISK_TEMPLATE_OPT,
+  FILESTORE_DIR_OPT,
+  FILESTORE_DRIVER_OPT,
+  HYPERVISOR_OPT,
+  IALLOCATOR_OPT,
+  IDENTIFY_DEFAULTS_OPT,
   NET_OPT,
+  NODE_PLACEMENT_OPT,
+  NOIPCHECK_OPT,
+  NONAMECHECK_OPT,
   NONICS_OPT,
   NWSYNC_OPT,
+  OS_SIZE_OPT,
   SRC_DIR_OPT,
   SRC_NODE_OPT,
-  NOIPCHECK_OPT,
-  NONAMECHECK_OPT,
-  IALLOCATOR_OPT,
-  FILESTORE_DIR_OPT,
-  FILESTORE_DRIVER_OPT,
-  HYPERVISOR_OPT,
   SUBMIT_OPT,
   ]
 
diff --git a/test/ganeti.objects_unittest.py b/test/ganeti.objects_unittest.py
index 5acddf2bdeface1f7a6386349dbfc0629fa26af2..6b5cfe716a3988c000b9d1dd1768626f768b226c 100755
--- a/test/ganeti.objects_unittest.py
+++ b/test/ganeti.objects_unittest.py
@@ -79,6 +79,16 @@ class TestClusterObject(unittest.TestCase):
     self.fake_cl = objects.Cluster(hvparams=hvparams, os_hvp=os_hvp)
     self.fake_cl.UpgradeConfig()
 
+  def testGetHVDefaults(self):
+    cl = self.fake_cl
+    self.failUnlessEqual(cl.GetHVDefaults(constants.HT_FAKE),
+                         cl.hvparams[constants.HT_FAKE])
+    self.failUnlessEqual(cl.GetHVDefaults(None), {})
+    self.failUnlessEqual(cl.GetHVDefaults(constants.HT_XEN_PVM,
+                                          os_name="lenny-image"),
+                         cl.os_hvp["lenny-image"][constants.HT_XEN_PVM])
+
+
   def testFillHvFullMerge(self):
     inst_hvparams = {
       "blah": "blubb",