diff --git a/.gitignore b/.gitignore
index 091a89223e5289de90d0d6ccdd1cbac61e0f9abe..02799083967f1b9c7c67c5a54b154382e5257ceb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -102,6 +102,7 @@
 /tools/kvm-ifup
 /tools/burnin
 /tools/ensure-dirs
+/tools/users-setup
 /tools/vcluster-setup
 /tools/node-cleanup
 /tools/node-daemon-setup
diff --git a/Makefile.am b/Makefile.am
index e61b45bd4f530723a35c3863ef3154f6bfc9d552..45a002427b83c66352a23f02fe0ad14dd4f70481 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -190,6 +190,7 @@ CLEANFILES = \
 	$(man_MANS) \
 	$(manhtml) \
 	tools/kvm-ifup \
+	tools/users-setup \
 	tools/vcluster-setup \
 	stamp-directories \
 	stamp-srclinks \
@@ -856,6 +857,7 @@ python_scripts = \
 
 dist_tools_SCRIPTS = \
 	$(python_scripts) \
+	tools/burnin \
 	tools/kvm-console-wrapper \
 	tools/master-ip-setup \
 	tools/xen-console-wrapper
@@ -865,6 +867,7 @@ nodist_tools_python_scripts = \
 
 nodist_tools_SCRIPTS = \
 	$(nodist_tools_python_scripts) \
+	tools/users-setup \
 	tools/vcluster-setup
 
 pkglib_python_scripts = \
@@ -872,7 +875,6 @@ pkglib_python_scripts = \
 	tools/check-cert-expired
 
 nodist_pkglib_python_scripts = \
-	tools/burnin \
 	tools/ensure-dirs \
 	tools/node-daemon-setup \
 	tools/prepare-node-join
@@ -915,6 +917,7 @@ EXTRA_DIST = \
 	devel/upload \
 	devel/webserver \
 	tools/kvm-ifup.in \
+	tools/users-setup.in \
 	tools/vcluster-setup.in \
 	$(docinput) \
 	doc/html \
@@ -990,6 +993,8 @@ TEST_FILES = \
 	test/data/htools/common-suffix.data \
 	test/data/htools/empty-cluster.data \
 	test/data/htools/hail-alloc-drbd.json \
+	test/data/htools/hail-alloc-invalid-twodisks.json \
+	test/data/htools/hail-alloc-twodisks.json \
 	test/data/htools/hail-change-group.json \
 	test/data/htools/hail-invalid-reloc.json \
 	test/data/htools/hail-node-evac.json \
@@ -1294,6 +1299,10 @@ tools/kvm-ifup: tools/kvm-ifup.in $(REPLACE_VARS_SED)
 	sed -f $(REPLACE_VARS_SED) < $< > $@
 	chmod +x $@
 
+tools/users-setup: tools/users-setup.in $(REPLACE_VARS_SED)
+	sed -f $(REPLACE_VARS_SED) < $< > $@
+	chmod +x $@
+
 tools/vcluster-setup: tools/vcluster-setup.in $(REPLACE_VARS_SED)
 	sed -f $(REPLACE_VARS_SED) < $< > $@
 	chmod +x $@
diff --git a/NEWS b/NEWS
index 7f8358c383e420670929db5d361c54da9f773f16..b8ccc6092a08bcbaef39b7d560e8e4b7e7f7749f 100644
--- a/NEWS
+++ b/NEWS
@@ -24,6 +24,53 @@ Version 2.8.0 beta1
   configuration back to the previous stable version.
 
 
+Version 2.7.0 rc1
+-----------------
+
+*(unreleased)*
+
+- Fix hail to verify disk instance policies on a per-disk basis (Issue 418).
+
+
+Version 2.7.0 beta2
+-------------------
+
+*(Released Tue, 2 Apr 2013)*
+
+- Networks no longer have a "type" slot, since this information was
+  unused in Ganeti: instead of it tags should be used.
+- Diskless instances are now externally mirrored (Issue 237). This for
+  now has only been tested in conjunction with explicit target nodes for
+  migration/failover.
+- The rapi client now has a ``target_node`` option to MigrateInstance.
+- Fix early exit return code for hbal (Issue 386).
+- Fix ``gnt-instance migrate/failover -n`` (Issue 396).
+- Fix ``rbd showmapped`` output parsing (Issue 312).
+- Networks are now referenced indexed by UUID, rather than name. This
+  will require running cfgupgrade, from 2.7.0beta1, if networks are in
+  use.
+- The OS environment now includes network information.
+- Deleting of a network is now disallowed if any instance nic is using
+  it, to prevent dangling references.
+- External storage is now documented in man pages.
+- The exclusive_storage flag can now only be set at nodegroup level.
+- Hbal can now submit an explicit priority with its jobs.
+- Many network related locking fixes.
+- Bump up the required pylint version to 0.25.1.
+- Fix the ``no_remember`` option in RAPI client.
+- Many ipolicy related tests, qa, and fixes.
+- Many documentation improvements and fixes.
+- Fix building with ``--disable-file-storage``.
+- Fix ``-q`` option in htools, which was broken if passed more than
+  once.
+- Some haskell/python interaction improvements and fixes.
+- Fix iallocator in case of missing LVM storage.
+- Fix confd config load in case of ``--no-lvm-storage``.
+- The confd/query functionality is now mentioned in the security
+  documentation.
+>>>>>>> devel-2.7
+
+
 Version 2.7.0 beta1
 -------------------
 
diff --git a/UPGRADE b/UPGRADE
index a1d21e764cacb5ff57b4c13fe9db84c550709be7..c3b3de51a9ece1b3895b25fbbcaa9569b2b37817 100644
--- a/UPGRADE
+++ b/UPGRADE
@@ -51,6 +51,10 @@ To run commands on all nodes, the `distributed shell (dsh)
    (``cfgupgrade`` supports a number of parameters, run it with
    ``--help`` for more information)
 
+#. Upgrade the directory permissions on all nodes::
+
+    $ /usr/lib/ganeti/ensure-dirs --full-run
+
 #. Restart daemons on all nodes::
 
     $ /etc/init.d/ganeti restart
diff --git a/autotools/check-news b/autotools/check-news
index 146faf2ea6229bb73b62b8f39eefd57c0e7f894d..cb5ac9a410e5f5ebf8eac4d2e69c12ec7020815e 100755
--- a/autotools/check-news
+++ b/autotools/check-news
@@ -113,6 +113,8 @@ def main():
       m = RELEASED_RE.match(line)
       if not m:
         Error("Line %s: Invalid release line" % fileinput.filelineno())
+        expect_date = False
+        continue
 
       # Including the weekday in the date string does not work as time.strptime
       # would return an inconsistent result if the weekday is incorrect.
diff --git a/configure.ac b/configure.ac
index f7193afd9803453ab2cd63d7008e867713342cb0..b2ae6189ad9557c2e5edf24815b97220fc4b0173 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
 m4_define([gnt_version_major], [2])
 m4_define([gnt_version_minor], [7])
 m4_define([gnt_version_revision], [0])
-m4_define([gnt_version_suffix], [~beta1])
+m4_define([gnt_version_suffix], [~beta2])
 m4_define([gnt_version_full],
           m4_format([%d.%d.%d%s],
                     gnt_version_major, gnt_version_minor,
diff --git a/lib/backend.py b/lib/backend.py
index 17d746b2cbb99eef2c21b60d07feefeb9394cea4..3668d4f64578d727ce9e715b5335d14ba31d3007 100644
--- a/lib/backend.py
+++ b/lib/backend.py
@@ -3020,7 +3020,7 @@ def JobQueueUpdate(file_name, content):
 
   # Write and replace the file atomically
   utils.WriteFile(file_name, data=_Decompress(content), uid=getents.masterd_uid,
-                  gid=getents.masterd_gid)
+                  gid=getents.daemons_gid, mode=constants.JOB_QUEUE_FILES_PERMS)
 
 
 def JobQueueRename(old, new):
@@ -3044,8 +3044,8 @@ def JobQueueRename(old, new):
 
   getents = runtime.GetEnts()
 
-  utils.RenameFile(old, new, mkdir=True, mkdir_mode=0700,
-                   dir_uid=getents.masterd_uid, dir_gid=getents.masterd_gid)
+  utils.RenameFile(old, new, mkdir=True, mkdir_mode=0750,
+                   dir_uid=getents.masterd_uid, dir_gid=getents.daemons_gid)
 
 
 def BlockdevClose(instance_name, disks):
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index 9855aedb15d7848a60351d26eebf48e72dcebe32..dc6e9079be1b92117b519ece9a9e98faf1753d9e 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -1529,7 +1529,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
     "INSTANCE_STATUS": status,
     "INSTANCE_MINMEM": minmem,
     "INSTANCE_MAXMEM": maxmem,
-    # TODO(2.7) remove deprecated "memory" value
+    # TODO(2.9) remove deprecated "memory" value
     "INSTANCE_MEMORY": maxmem,
     "INSTANCE_VCPUS": vcpus,
     "INSTANCE_DISK_TEMPLATE": disk_template,
@@ -8428,6 +8428,10 @@ class LUInstanceMove(LogicalUnit):
     assert self.instance is not None, \
       "Cannot retrieve locked instance %s" % self.op.instance_name
 
+    if instance.disk_template not in constants.DTS_COPYABLE:
+      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
+                                 instance.disk_template, errors.ECODE_STATE)
+
     node = self.cfg.GetNodeInfo(self.op.target_node)
     assert node is not None, \
       "Cannot retrieve locked node %s" % self.op.target_node
@@ -8503,12 +8507,9 @@ class LUInstanceMove(LogicalUnit):
     try:
       _CreateDisks(self, instance, target_node=target_node)
     except errors.OpExecError:
-      self.LogWarning("Device creation failed, reverting...")
-      try:
-        _RemoveDisks(self, instance, target_node=target_node)
-      finally:
-        self.cfg.ReleaseDRBDMinors(instance.name)
-        raise
+      self.LogWarning("Device creation failed")
+      self.cfg.ReleaseDRBDMinors(instance.name)
+      raise
 
     cluster_name = self.cfg.GetClusterInfo().cluster_name
 
@@ -9741,6 +9742,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     result.Raise("Failed to create directory '%s' on"
                  " node %s" % (file_storage_dir, pnode))
 
+  disks_created = []
   # Note: this needs to be kept in sync with adding of disks in
   # LUInstanceSetParams
   for idx, device in enumerate(instance.disks):
@@ -9750,7 +9752,19 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
     #HARDCODE
     for node in all_nodes:
       f_create = node == pnode
-      _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+      try:
+        _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
+        disks_created.append((node, device))
+      except errors.OpExecError:
+        logging.warning("Creating disk %s for instance '%s' failed",
+                        idx, instance.name)
+        for (node, disk) in disks_created:
+          lu.cfg.SetDiskID(disk, node)
+          result = lu.rpc.call_blockdev_remove(node, disk)
+          if result.fail_msg:
+            logging.warning("Failed to remove newly-created disk %s on node %s:"
+                            " %s", device, node, result.fail_msg)
+        raise
 
 
 def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
@@ -9758,8 +9772,7 @@ def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
 
   This abstracts away some work from `AddInstance()` and
   `RemoveInstance()`. Note that in case some of the devices couldn't
-  be removed, the removal will continue with the other ones (compare
-  with `_CreateDisks()`).
+  be removed, the removal will continue with the other ones.
 
   @type lu: L{LogicalUnit}
   @param lu: the logical unit on whose behalf we execute
@@ -11033,12 +11046,9 @@ class LUInstanceCreate(LogicalUnit):
       try:
         _CreateDisks(self, iobj)
       except errors.OpExecError:
-        self.LogWarning("Device creation failed, reverting...")
-        try:
-          _RemoveDisks(self, iobj)
-        finally:
-          self.cfg.ReleaseDRBDMinors(instance)
-          raise
+        self.LogWarning("Device creation failed")
+        self.cfg.ReleaseDRBDMinors(instance)
+        raise
 
     feedback_fn("adding instance %s to cluster config" % instance)
 
@@ -14100,6 +14110,7 @@ class LUInstanceSetParams(LogicalUnit):
     # update instance structure
     instance.disks = new_disks
     instance.disk_template = constants.DT_PLAIN
+    _UpdateIvNames(0, instance.disks)
     self.cfg.Update(instance, feedback_fn)
 
     # Release locks in case removing disks takes a while
diff --git a/lib/constants.py b/lib/constants.py
index c8427ddba6188556635c41c2d2073653052f23ca..7b254ca97f248a5face6059a1558223fe3eb7f3f 100644
--- a/lib/constants.py
+++ b/lib/constants.py
@@ -533,6 +533,14 @@ DTS_FILEBASED = compat.UniqueFrozenset([
   DT_SHARED_FILE,
   ])
 
+# the set of disk templates that can be moved by copying
+# Note: a requirement is that they're not accessed externally or shared between
+# nodes; in particular, sharedfile is not suitable.
+DTS_COPYABLE = compat.UniqueFrozenset([
+  DT_FILE,
+  DT_PLAIN,
+  ])
+
 # the set of disk templates that are supported by exclusive_storage
 DTS_EXCL_STORAGE = compat.UniqueFrozenset([DT_PLAIN])
 
@@ -1761,6 +1769,7 @@ NODE_EVAC_MODES = compat.UniqueFrozenset([
 # Job queue
 JOB_QUEUE_VERSION = 1
 JOB_QUEUE_SIZE_HARD_LIMIT = 5000
+JOB_QUEUE_FILES_PERMS = 0640
 
 JOB_ID_TEMPLATE = r"\d+"
 JOB_FILE_RE = re.compile(r"^job-(%s)$" % JOB_ID_TEMPLATE)
diff --git a/lib/jqueue.py b/lib/jqueue.py
index 9752f93064f47178c745756696607718df40af35..7ad2ea8ca0e36c95f2303bc4f38cd7ac50c616ec 100644
--- a/lib/jqueue.py
+++ b/lib/jqueue.py
@@ -1885,7 +1885,8 @@ class JobQueue(object):
     """
     getents = runtime.GetEnts()
     utils.WriteFile(file_name, data=data, uid=getents.masterd_uid,
-                    gid=getents.masterd_gid)
+                    gid=getents.daemons_gid,
+                    mode=constants.JOB_QUEUE_FILES_PERMS)
 
     if replicate:
       names, addrs = self._GetNodeIp()
diff --git a/lib/jstore.py b/lib/jstore.py
index f20da060408ae17ef77c568048105d71bf842d5f..324f91e1fcefcf343a4f4f2402b17a6861abc24a 100644
--- a/lib/jstore.py
+++ b/lib/jstore.py
@@ -111,7 +111,8 @@ def InitAndVerifyQueue(must_lock):
       if version is None:
         # Write new version file
         utils.WriteFile(pathutils.JOB_QUEUE_VERSION_FILE,
-                        uid=getents.masterd_uid, gid=getents.masterd_gid,
+                        uid=getents.masterd_uid, gid=getents.daemons_gid,
+                        mode=constants.JOB_QUEUE_FILES_PERMS,
                         data="%s\n" % constants.JOB_QUEUE_VERSION)
 
         # Read again
@@ -125,7 +126,8 @@ def InitAndVerifyQueue(must_lock):
       if serial is None:
         # Write new serial file
         utils.WriteFile(pathutils.JOB_QUEUE_SERIAL_FILE,
-                        uid=getents.masterd_uid, gid=getents.masterd_gid,
+                        uid=getents.masterd_uid, gid=getents.daemons_gid,
+                        mode=constants.JOB_QUEUE_FILES_PERMS,
                         data="%s\n" % 0)
 
         # Read again
@@ -174,7 +176,8 @@ def SetDrainFlag(drain_flag):
 
   if drain_flag:
     utils.WriteFile(pathutils.JOB_QUEUE_DRAIN_FILE, data="",
-                    uid=getents.masterd_uid, gid=getents.masterd_gid)
+                    uid=getents.masterd_uid, gid=getents.daemons_gid,
+                    mode=constants.JOB_QUEUE_FILES_PERMS)
   else:
     utils.RemoveFile(pathutils.JOB_QUEUE_DRAIN_FILE)
 
diff --git a/lib/masterd/instance.py b/lib/masterd/instance.py
index d99f4d87bb5e0d09e8d28b4b8e467cfab047ae9b..095401121518b68c1dad6b01199611a8b03d442d 100644
--- a/lib/masterd/instance.py
+++ b/lib/masterd/instance.py
@@ -1621,7 +1621,7 @@ def ComputeDiskSize(disk_template, disks):
   """
   # Required free disk space as a function of disk and swap space
   req_size_dict = {
-    constants.DT_DISKLESS: None,
+    constants.DT_DISKLESS: 0,
     constants.DT_PLAIN: sum(d[constants.IDISK_SIZE] for d in disks),
     # 128 MB are added for drbd metadata for each disk
     constants.DT_DRBD8:
diff --git a/lib/network.py b/lib/network.py
index 170a8b778935a8e3c298be971af4654047dd5de9..d78b717355a99e742d1496bd929309c395265cac 100644
--- a/lib/network.py
+++ b/lib/network.py
@@ -29,8 +29,20 @@ from bitarray import bitarray
 
 from ganeti import errors
 
+
+def _ComputeIpv4NumHosts(network_size):
+  """Derives the number of hosts in an IPv4 network from the size.
+
+  """
+  return 2 ** (32 - network_size)
+
+
 IPV4_NETWORK_MIN_SIZE = 30
-IPV4_NETWORK_MIN_NUM_HOSTS = 2 ** (32 - IPV4_NETWORK_MIN_SIZE)
+# FIXME: This limit is for performance reasons. Remove when refactoring
+# for performance tuning was successful.
+IPV4_NETWORK_MAX_SIZE = 16
+IPV4_NETWORK_MIN_NUM_HOSTS = _ComputeIpv4NumHosts(IPV4_NETWORK_MIN_SIZE)
+IPV4_NETWORK_MAX_NUM_HOSTS = _ComputeIpv4NumHosts(IPV4_NETWORK_MAX_SIZE)
 
 
 class AddressPool(object):
@@ -58,6 +70,13 @@ class AddressPool(object):
     self.net = network
 
     self.network = ipaddr.IPNetwork(self.net.network)
+    if self.network.numhosts > IPV4_NETWORK_MAX_NUM_HOSTS:
+      raise errors.AddressPoolError("A big network with %s host(s) is currently"
+                                    " not supported. please specify at most a"
+                                    " /%s network" %
+                                    (str(self.network.numhosts),
+                                     IPV4_NETWORK_MAX_SIZE))
+
     if self.network.numhosts < IPV4_NETWORK_MIN_NUM_HOSTS:
       raise errors.AddressPoolError("A network with only %s host(s) is too"
                                     " small, please specify at least a /%s"
diff --git a/lib/objects.py b/lib/objects.py
index e9943857124bc360e4d750d1a18a755225ca293c..e5023ecea5a02fccbb3981cb408fb7698a2f8596 100644
--- a/lib/objects.py
+++ b/lib/objects.py
@@ -1652,7 +1652,7 @@ class Cluster(TaggableObject):
       wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
       if wrongkeys:
         # These keys would be silently removed by FillIPolicy()
-        msg = ("Cluster instance policy contains spourious keys: %s" %
+        msg = ("Cluster instance policy contains spurious keys: %s" %
                utils.CommaJoin(wrongkeys))
         raise errors.ConfigurationError(msg)
       self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
diff --git a/lib/tools/ensure_dirs.py b/lib/tools/ensure_dirs.py
index 85c32ddcc90cd8b79e9d96f99d4207ce60249fbb..d62414ae2670a69223f1ceba086484374056121b 100644
--- a/lib/tools/ensure_dirs.py
+++ b/lib/tools/ensure_dirs.py
@@ -159,19 +159,19 @@ def GetPaths():
                   getent.noded_uid, getent.noded_gid, False))
 
   paths.extend([
-    (pathutils.QUEUE_DIR, DIR, 0700, getent.masterd_uid, getent.masterd_gid),
-    (pathutils.QUEUE_DIR, QUEUE_DIR, 0600,
-     getent.masterd_uid, getent.masterd_gid),
+    (pathutils.QUEUE_DIR, DIR, 0750, getent.masterd_uid, getent.daemons_gid),
+    (pathutils.QUEUE_DIR, QUEUE_DIR, constants.JOB_QUEUE_FILES_PERMS,
+     getent.masterd_uid, getent.daemons_gid),
     (pathutils.JOB_QUEUE_DRAIN_FILE, FILE, 0644,
-     getent.masterd_uid, getent.masterd_gid, False),
-    (pathutils.JOB_QUEUE_LOCK_FILE, FILE, 0600,
-     getent.masterd_uid, getent.masterd_gid, False),
-    (pathutils.JOB_QUEUE_SERIAL_FILE, FILE, 0600,
-     getent.masterd_uid, getent.masterd_gid, False),
-    (pathutils.JOB_QUEUE_VERSION_FILE, FILE, 0600,
-     getent.masterd_uid, getent.masterd_gid, False),
-    (pathutils.JOB_QUEUE_ARCHIVE_DIR, DIR, 0700,
-     getent.masterd_uid, getent.masterd_gid),
+     getent.masterd_uid, getent.daemons_gid, False),
+    (pathutils.JOB_QUEUE_LOCK_FILE, FILE, constants.JOB_QUEUE_FILES_PERMS,
+     getent.masterd_uid, getent.daemons_gid, False),
+    (pathutils.JOB_QUEUE_SERIAL_FILE, FILE, constants.JOB_QUEUE_FILES_PERMS,
+     getent.masterd_uid, getent.daemons_gid, False),
+    (pathutils.JOB_QUEUE_VERSION_FILE, FILE, constants.JOB_QUEUE_FILES_PERMS,
+     getent.masterd_uid, getent.daemons_gid, False),
+    (pathutils.JOB_QUEUE_ARCHIVE_DIR, DIR, 0740,
+     getent.masterd_uid, getent.daemons_gid),
     (rapi_dir, DIR, 0750, getent.rapi_uid, getent.masterd_gid),
     (pathutils.RAPI_USERS_FILE, FILE, 0640,
      getent.rapi_uid, getent.masterd_gid, False),
@@ -246,7 +246,7 @@ def Main():
 
     if opts.full_run:
       RecursiveEnsure(pathutils.JOB_QUEUE_ARCHIVE_DIR, getent.masterd_uid,
-                      getent.masterd_gid, 0700, 0600)
+                      getent.daemons_gid, 0750, constants.JOB_QUEUE_FILES_PERMS)
   except errors.GenericError, err:
     logging.error("An error occurred while setting permissions: %s", err)
     return constants.EXIT_FAILURE
diff --git a/src/Ganeti/HTools/Backend/IAlloc.hs b/src/Ganeti/HTools/Backend/IAlloc.hs
index d1d1436119b8f4c15af79908f394bd6c09731ca7..65cbf3d5f25f70cff3798985195a48776ce14759 100644
--- a/src/Ganeti/HTools/Backend/IAlloc.hs
+++ b/src/Ganeti/HTools/Backend/IAlloc.hs
@@ -65,14 +65,17 @@ parseBaseInstance :: String
                   -> JSRecord
                   -> Result (String, Instance.Instance)
 parseBaseInstance n a = do
-  let extract x = tryFromObj ("invalid data for instance '" ++ n ++ "'") a x
+  let errorMessage = "invalid data for instance '" ++ n ++ "'"
+  let extract x = tryFromObj errorMessage a x
   disk  <- extract "disk_space_total"
+  disks <- extract "disks" >>= toArray >>= asObjectList >>=
+           mapM (flip (tryFromObj errorMessage) "size" . fromJSObject)
   mem   <- extract "memory"
   vcpus <- extract "vcpus"
   tags  <- extract "tags"
   dt    <- extract "disk_template"
   su    <- extract "spindle_use"
-  return (n, Instance.create n mem disk vcpus Running tags True 0 0 dt su)
+  return (n, Instance.create n mem disk disks vcpus Running tags True 0 0 dt su)
 
 -- | Parses an instance as found in the cluster instance list.
 parseInstance :: NameAssoc -- ^ The node name-to-index association list
diff --git a/src/Ganeti/HTools/Backend/Luxi.hs b/src/Ganeti/HTools/Backend/Luxi.hs
index febb0ab434832b4f37a00b60b532b4729383bf84..eca265e063b0e5852d4df009b48f46c676195802 100644
--- a/src/Ganeti/HTools/Backend/Luxi.hs
+++ b/src/Ganeti/HTools/Backend/Luxi.hs
@@ -172,7 +172,7 @@ parseInstance ktn [ name, disk, mem, vcpus
   xauto_balance <- convert "auto_balance" auto_balance
   xdt <- convert "disk_template" disk_template
   xsu <- convert "be/spindle_use" su
-  let inst = Instance.create xname xmem xdisk xvcpus
+  let inst = Instance.create xname xmem xdisk [xdisk] xvcpus
              xrunning xtags xauto_balance xpnode snode xdt xsu
   return (xname, inst)
 
diff --git a/src/Ganeti/HTools/Backend/Rapi.hs b/src/Ganeti/HTools/Backend/Rapi.hs
index 5a1cd49349babd5cd6bfc84a394e2a53dd00ab18..4654c8825e683f61e82902876e8dffdfe4148e5f 100644
--- a/src/Ganeti/HTools/Backend/Rapi.hs
+++ b/src/Ganeti/HTools/Backend/Rapi.hs
@@ -122,6 +122,7 @@ parseInstance ktn a = do
   let owner_name = "Instance '" ++ name ++ "', error while parsing data"
   let extract s x = tryFromObj owner_name x s
   disk <- extract "disk_usage" a
+  disks <- extract "disk.sizes" a
   beparams <- liftM fromJSObject (extract "beparams" a)
   omem <- extract "oper_ram" a
   mem <- case omem of
@@ -138,7 +139,7 @@ parseInstance ktn a = do
   auto_balance <- extract "auto_balance" beparams
   dt <- extract "disk_template" a
   su <- extract "spindle_use" beparams
-  let inst = Instance.create name mem disk vcpus running tags
+  let inst = Instance.create name mem disk disks vcpus running tags
              auto_balance pnode snode dt su
   return (name, inst)
 
diff --git a/src/Ganeti/HTools/Backend/Text.hs b/src/Ganeti/HTools/Backend/Text.hs
index 2ca928f3754767bc81fac07c158589a87972c124..755d8bebf14988eab7834e09d7271e758851bc46 100644
--- a/src/Ganeti/HTools/Backend/Text.hs
+++ b/src/Ganeti/HTools/Backend/Text.hs
@@ -231,7 +231,7 @@ loadInst ktn [ name, mem, dsk, vcpus, status, auto_bal, pnode, snode
   when (sidx == pidx) . fail $ "Instance " ++ name ++
            " has same primary and secondary node - " ++ pnode
   let vtags = commaSplit tags
-      newinst = Instance.create name vmem vdsk vvcpus vstatus vtags
+      newinst = Instance.create name vmem vdsk [vdsk] vvcpus vstatus vtags
                 auto_balance pidx sidx disk_template spindle_use
   return (name, newinst)
 
diff --git a/src/Ganeti/HTools/Instance.hs b/src/Ganeti/HTools/Instance.hs
index d227cf174a402b2e48e4d120fa18a4263824560a..8de100979ce93743e17145500ea3655f3169caae 100644
--- a/src/Ganeti/HTools/Instance.hs
+++ b/src/Ganeti/HTools/Instance.hs
@@ -70,7 +70,8 @@ data Instance = Instance
   { name         :: String    -- ^ The instance name
   , alias        :: String    -- ^ The shortened name
   , mem          :: Int       -- ^ Memory of the instance
-  , dsk          :: Int       -- ^ Disk size of instance
+  , dsk          :: Int       -- ^ Total disk usage of the instance
+  , disks        :: [Int]     -- ^ Sizes of the individual disks
   , vcpus        :: Int       -- ^ Number of VCPUs
   , runSt        :: T.InstanceStatus -- ^ Original run status
   , pNode        :: T.Ndx     -- ^ Original primary node
@@ -162,15 +163,16 @@ type List = Container.Container Instance
 --
 -- Some parameters are not initialized by function, and must be set
 -- later (via 'setIdx' for example).
-create :: String -> Int -> Int -> Int -> T.InstanceStatus
+create :: String -> Int -> Int -> [Int] -> Int -> T.InstanceStatus
        -> [String] -> Bool -> T.Ndx -> T.Ndx -> T.DiskTemplate -> Int
        -> Instance
-create name_init mem_init dsk_init vcpus_init run_init tags_init
+create name_init mem_init dsk_init disks_init vcpus_init run_init tags_init
        auto_balance_init pn sn dt su =
   Instance { name = name_init
            , alias = name_init
            , mem = mem_init
            , dsk = dsk_init
+           , disks = disks_init
            , vcpus = vcpus_init
            , runSt = run_init
            , pNode = pn
@@ -265,7 +267,7 @@ specOf Instance { mem = m, dsk = d, vcpus = c } =
 instBelowISpec :: Instance -> T.ISpec -> T.OpResult ()
 instBelowISpec inst ispec
   | mem inst > T.iSpecMemorySize ispec = Bad T.FailMem
-  | dsk inst > T.iSpecDiskSize ispec   = Bad T.FailDisk
+  | any (> T.iSpecDiskSize ispec) (disks inst) = Bad T.FailDisk
   | vcpus inst > T.iSpecCpuCount ispec = Bad T.FailCPU
   | otherwise = Ok ()
 
@@ -273,7 +275,7 @@ instBelowISpec inst ispec
 instAboveISpec :: Instance -> T.ISpec -> T.OpResult ()
 instAboveISpec inst ispec
   | mem inst < T.iSpecMemorySize ispec = Bad T.FailMem
-  | dsk inst < T.iSpecDiskSize ispec   = Bad T.FailDisk
+  | any (< T.iSpecDiskSize ispec) (disks inst) = Bad T.FailDisk
   | vcpus inst < T.iSpecCpuCount ispec = Bad T.FailCPU
   | otherwise = Ok ()
 
diff --git a/src/Ganeti/HTools/Program/Hspace.hs b/src/Ganeti/HTools/Program/Hspace.hs
index 73d39a485a7bc677cf988523425fc730dba78f32..f4b0836d921f00208fd76873fa98553873e540cb 100644
--- a/src/Ganeti/HTools/Program/Hspace.hs
+++ b/src/Ganeti/HTools/Program/Hspace.hs
@@ -390,9 +390,12 @@ runAllocation cdata stop_allocation actual_result spec dt mode opts = do
   return (sortReasons reasons, new_nl, length new_ixes, tieredSpecMap new_ixes)
 
 -- | Create an instance from a given spec.
+-- For values not implied by the resorce specification (like distribution of
+-- of the disk space to individual disks), sensible defaults are guessed (e.g.,
+-- having a single disk).
 instFromSpec :: RSpec -> DiskTemplate -> Int -> Instance.Instance
 instFromSpec spx =
-  Instance.create "new" (rspecMem spx) (rspecDsk spx)
+  Instance.create "new" (rspecMem spx) (rspecDsk spx) [rspecDsk spx]
     (rspecCpu spx) Running [] True (-1) (-1)
 
 -- | Main function.
diff --git a/src/Ganeti/JQueue.hs b/src/Ganeti/JQueue.hs
index 39aa3fc604009f9354af9a726f8422fe428d3976..828e6625168baacadd8604d737068c6fdda79973 100644
--- a/src/Ganeti/JQueue.hs
+++ b/src/Ganeti/JQueue.hs
@@ -230,25 +230,44 @@ determineJobDirectories rootdir archived = do
              else return []
   return $ rootdir:other
 
+-- Function equivalent to the \'sequence\' function, that cannot be used because
+-- of library version conflict on Lucid.
+-- FIXME: delete this and just use \'sequence\' instead when Lucid compatibility
+-- will not be required anymore.
+sequencer :: [Either IOError [JobId]] -> Either IOError [[JobId]]
+sequencer l = fmap reverse $ foldl seqFolder (Right []) l
+
+-- | Folding function for joining multiple [JobIds] into one list.
+seqFolder :: Either IOError [[JobId]]
+          -> Either IOError [JobId]
+          -> Either IOError [[JobId]]
+seqFolder (Left e) _ = Left e
+seqFolder (Right _) (Left e) = Left e
+seqFolder (Right l) (Right el) = Right $ el:l
+
 -- | Computes the list of all jobs in the given directories.
-getJobIDs :: [FilePath] -> IO [JobId]
-getJobIDs = liftM concat . mapM getDirJobIDs
+getJobIDs :: [FilePath] -> IO (Either IOError [JobId])
+getJobIDs paths = liftM (fmap concat . sequencer) (mapM getDirJobIDs paths)
 
 -- | Sorts the a list of job IDs.
 sortJobIDs :: [JobId] -> [JobId]
 sortJobIDs = sortBy (comparing fromJobId)
 
 -- | Computes the list of jobs in a given directory.
-getDirJobIDs :: FilePath -> IO [JobId]
+getDirJobIDs :: FilePath -> IO (Either IOError [JobId])
 getDirJobIDs path = do
-  contents <- getDirectoryContents path `Control.Exception.catch`
-                ignoreIOError [] False
-                  ("Failed to list job directory " ++ path)
-  let jids = foldl (\ids file ->
-                      case parseJobFileId file of
-                        Nothing -> ids
-                        Just new_id -> new_id:ids) [] contents
-  return $ reverse jids
+  either_contents <-
+    try (getDirectoryContents path) :: IO (Either IOError [FilePath])
+  case either_contents of
+    Left e -> do
+      logWarning $ "Failed to list job directory " ++ path ++ ": " ++ show e
+      return $ Left e
+    Right contents -> do
+      let jids = foldl (\ids file ->
+                         case parseJobFileId file of
+                           Nothing -> ids
+                           Just new_id -> new_id:ids) [] contents
+      return . Right $ reverse jids
 
 -- | Reads the job data from disk.
 readJobDataFromDisk :: FilePath -> Bool -> JobId -> IO (Maybe (String, Bool))
diff --git a/src/Ganeti/OpCodes.hs b/src/Ganeti/OpCodes.hs
index 8b4cda6664858ec44a52479b1006d811da1c5cbc..cf34b22222c67d79693ab1b6499abeb0303bb016 100644
--- a/src/Ganeti/OpCodes.hs
+++ b/src/Ganeti/OpCodes.hs
@@ -48,7 +48,7 @@ module Ganeti.OpCodes
   ) where
 
 import Data.Maybe (fromMaybe)
-import Text.JSON (readJSON, showJSON, JSON, JSValue, makeObj)
+import Text.JSON (readJSON, JSON, JSValue, makeObj)
 import qualified Text.JSON
 
 import Ganeti.THH
diff --git a/src/Ganeti/Query/Query.hs b/src/Ganeti/Query/Query.hs
index a3ea143d50b3045ac6d3235cb35c1b8b0f3b0180..bce1f3f522850758737fdbe015e4cbf7b43fa8df 100644
--- a/src/Ganeti/Query/Query.hs
+++ b/src/Ganeti/Query/Query.hs
@@ -53,7 +53,7 @@ module Ganeti.Query.Query
     ) where
 
 import Control.DeepSeq
-import Control.Monad (filterM, liftM, foldM)
+import Control.Monad (filterM, foldM)
 import Control.Monad.Trans (lift)
 import Data.List (intercalate)
 import Data.Maybe (fromMaybe)
@@ -238,9 +238,14 @@ queryJobs cfg live fields qfilter =
              Bad msg -> resultT . Bad $ GenericError msg
              Ok [] -> if live
                         -- we can check the filesystem for actual jobs
-                        then lift $ liftM sortJobIDs
-                             (determineJobDirectories rootdir want_arch >>=
-                              getJobIDs)
+                        then do
+                          maybeJobIDs <-
+                            lift (determineJobDirectories rootdir want_arch
+                              >>= getJobIDs)
+                          case maybeJobIDs of
+                            Left e -> (resultT . Bad) . BlockDeviceError $
+                              "Unable to fetch the job list: " ++ show e
+                            Right jobIDs -> resultT . Ok $ sortJobIDs jobIDs
                         -- else we shouldn't look at the filesystem...
                         else return []
              Ok v -> resultT $ Ok v
diff --git a/src/Ganeti/Query/Server.hs b/src/Ganeti/Query/Server.hs
index 3ab49b05915adfbe26af75d480e4007952ed27cf..dc6d0cfb4ba83827f63c17bac9e41040b662dd3f 100644
--- a/src/Ganeti/Query/Server.hs
+++ b/src/Ganeti/Query/Server.hs
@@ -198,7 +198,7 @@ handleClientMsg client creader args = do
   return True
 
 -- | Handles one iteration of the client protocol: receives message,
--- checks for validity and decods, returns response.
+-- checks it for validity and decodes it, returns response.
 handleClient :: Client -> ConfigReader -> IO Bool
 handleClient client creader = do
   !msg <- recvMsgExt client
diff --git a/test/data/htools/hail-alloc-invalid-twodisks.json b/test/data/htools/hail-alloc-invalid-twodisks.json
new file mode 100644
index 0000000000000000000000000000000000000000..355825666069fe21b5af600f1a8e6629dccf3024
--- /dev/null
+++ b/test/data/htools/hail-alloc-invalid-twodisks.json
@@ -0,0 +1,86 @@
+{
+  "cluster_tags": [],
+  "instances": {},
+  "ipolicy": {
+    "max": {
+      "disk-size": 2048
+    },
+    "min": {
+      "disk-size": 1024
+    }
+  },
+  "nodegroups": {
+    "uuid-group-1": {
+      "alloc_policy": "preferred",
+      "ipolicy": {
+        "disk-templates": [
+          "file"
+        ],
+        "max": {
+          "cpu-count": 2,
+          "disk-count": 8,
+          "disk-size": 2048,
+          "memory-size": 12800,
+          "nic-count": 8,
+          "spindle-use": 8
+        },
+        "min": {
+          "cpu-count": 1,
+          "disk-count": 1,
+          "disk-size": 1024,
+          "memory-size": 128,
+          "nic-count": 1,
+          "spindle-use": 1
+        },
+        "spindle-ratio": 32.0,
+        "std": {
+          "cpu-count": 1,
+          "disk-count": 1,
+          "disk-size": 1024,
+          "memory-size": 128,
+          "nic-count": 1,
+          "spindle-use": 1
+        },
+        "vcpu-ratio": 4.0
+      },
+      "name": "default",
+      "tags": []
+    }
+  },
+  "nodes": {
+    "node1": {
+      "drained": false,
+      "free_disk": 1377280,
+      "free_memory": 31389,
+      "group": "uuid-group-1",
+      "ndparams": {
+        "spindle_count": 1
+      },
+      "offline": false,
+      "reserved_memory": 1017,
+      "total_cpus": 4,
+      "total_disk": 1377280,
+      "total_memory": 32763
+    }
+  },
+  "request": {
+    "disk_space_total": 1536,
+    "disk_template": "file",
+    "disks": [
+      {
+        "size": 768
+      },
+      {
+        "size": 768
+      }
+    ],
+    "memory": 1024,
+    "name": "instance1",
+    "required_nodes": 1,
+    "spindle_use": 2,
+    "tags": [],
+    "type": "allocate",
+    "vcpus": 1
+  },
+  "version": 2
+}
diff --git a/test/data/htools/hail-alloc-twodisks.json b/test/data/htools/hail-alloc-twodisks.json
new file mode 100644
index 0000000000000000000000000000000000000000..abf7221415440ce5c9dde7833e8d9ff67dc5367d
--- /dev/null
+++ b/test/data/htools/hail-alloc-twodisks.json
@@ -0,0 +1,86 @@
+{
+  "cluster_tags": [],
+  "instances": {},
+  "ipolicy": {
+    "max": {
+      "disk-size": 2048
+    },
+    "min": {
+      "disk-size": 1024
+    }
+  },
+  "nodegroups": {
+    "uuid-group-1": {
+      "alloc_policy": "preferred",
+      "ipolicy": {
+        "disk-templates": [
+          "file"
+        ],
+        "max": {
+          "cpu-count": 2,
+          "disk-count": 8,
+          "disk-size": 2048,
+          "memory-size": 12800,
+          "nic-count": 8,
+          "spindle-use": 8
+        },
+        "min": {
+          "cpu-count": 1,
+          "disk-count": 1,
+          "disk-size": 1024,
+          "memory-size": 128,
+          "nic-count": 1,
+          "spindle-use": 1
+        },
+        "spindle-ratio": 32.0,
+        "std": {
+          "cpu-count": 1,
+          "disk-count": 1,
+          "disk-size": 1024,
+          "memory-size": 128,
+          "nic-count": 1,
+          "spindle-use": 1
+        },
+        "vcpu-ratio": 4.0
+      },
+      "name": "default",
+      "tags": []
+    }
+  },
+  "nodes": {
+    "node1": {
+      "drained": false,
+      "free_disk": 1377280,
+      "free_memory": 31389,
+      "group": "uuid-group-1",
+      "ndparams": {
+        "spindle_count": 1
+      },
+      "offline": false,
+      "reserved_memory": 1017,
+      "total_cpus": 4,
+      "total_disk": 1377280,
+      "total_memory": 32763
+    }
+  },
+  "request": {
+    "disk_space_total": 3072,
+    "disk_template": "file",
+    "disks": [
+      {
+        "size": 1536
+      },
+      {
+        "size": 1536
+      }
+    ],
+    "memory": 1024,
+    "name": "instance1",
+    "required_nodes": 1,
+    "spindle_use": 2,
+    "tags": [],
+    "type": "allocate",
+    "vcpus": 1
+  },
+  "version": 2
+}
diff --git a/test/hs/Test/Ganeti/HTools/Instance.hs b/test/hs/Test/Ganeti/HTools/Instance.hs
index 0f71c2672f58d1bf2e98948815a17ba44855316e..ca8f682fb3fec544f7effd7f95eaee13b5f2b6a5 100644
--- a/test/hs/Test/Ganeti/HTools/Instance.hs
+++ b/test/hs/Test/Ganeti/HTools/Instance.hs
@@ -62,7 +62,7 @@ genInstanceSmallerThan lim_mem lim_dsk lim_cpu = do
   sn <- arbitrary
   vcpus <- choose (0, lim_cpu)
   dt <- arbitrary
-  return $ Instance.create name mem dsk vcpus run_st [] True pn sn dt 1
+  return $ Instance.create name mem dsk [dsk] vcpus run_st [] True pn sn dt 1
 
 -- | Generates an instance smaller than a node.
 genInstanceSmallerThanNode :: Node.Node -> Gen Instance.Instance
diff --git a/test/hs/Test/Ganeti/JQueue.hs b/test/hs/Test/Ganeti/JQueue.hs
index d2d946fec743c2c3e94bafdeb9607b95b1455ff6..e23733644154bd6f2a0fb5afc71398cf67ad93dd 100644
--- a/test/hs/Test/Ganeti/JQueue.hs
+++ b/test/hs/Test/Ganeti/JQueue.hs
@@ -176,18 +176,27 @@ case_JobStatusPri_py_equiv = do
 -- | Tests listing of Job ids.
 prop_ListJobIDs :: Property
 prop_ListJobIDs = monadicIO $ do
+  let extractJobIDs jIDs = do
+        either_jobs <- jIDs
+        case either_jobs of
+          Right j -> return j
+          Left e -> fail $ show e
+      isLeft e =
+        case e of
+          Left _ -> True
+          _ -> False
   jobs <- pick $ resize 10 (listOf1 genJobId `suchThat` (\l -> l == nub l))
   (e, f, g) <-
     run . withSystemTempDirectory "jqueue-test." $ \tempdir -> do
-    empty_dir <- getJobIDs [tempdir]
+    empty_dir <- extractJobIDs $ getJobIDs [tempdir]
     mapM_ (\jid -> writeFile (tempdir </> jobFileName jid) "") jobs
-    full_dir <- getJobIDs [tempdir]
+    full_dir <- extractJobIDs $ getJobIDs [tempdir]
     invalid_dir <- getJobIDs [tempdir </> "no-such-dir"]
     return (empty_dir, sortJobIDs full_dir, invalid_dir)
   stop $ conjoin [ printTestCase "empty directory" $ e ==? []
                  , printTestCase "directory with valid names" $
                    f ==? sortJobIDs jobs
-                 , printTestCase "invalid directory" $ g ==? []
+                 , printTestCase "invalid directory" $ isLeft g
                  ]
 
 -- | Tests loading jobs from disk.
diff --git a/test/hs/Test/Ganeti/Runtime.hs b/test/hs/Test/Ganeti/Runtime.hs
index bf49c4b6b587b16f735cb4456e5a389cc136042d..faa2ac6d5fab1c450d5987d2c2e1a0aa8305dfa8 100644
--- a/test/hs/Test/Ganeti/Runtime.hs
+++ b/test/hs/Test/Ganeti/Runtime.hs
@@ -79,7 +79,7 @@ case_UsersGroups = do
     runPython "from ganeti import constants\n\
               \from ganeti import serializer\n\
               \import sys\n\
-              \users = [constants.MASTERD_GROUP,\n\
+              \users = [constants.MASTERD_USER,\n\
               \         constants.NODED_USER,\n\
               \         constants.RAPI_USER,\n\
               \         constants.CONFD_USER,\n\
diff --git a/test/hs/Test/Ganeti/TestHTools.hs b/test/hs/Test/Ganeti/TestHTools.hs
index 8758509547d16914c703a25920897470170ef11c..3b0ac6a682b7a42f89aa10a8292f66a2a828c75f 100644
--- a/test/hs/Test/Ganeti/TestHTools.hs
+++ b/test/hs/Test/Ganeti/TestHTools.hs
@@ -99,8 +99,8 @@ defGroupAssoc = Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
 -- | Create an instance given its spec.
 createInstance :: Int -> Int -> Int -> Instance.Instance
 createInstance mem dsk vcpus =
-  Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1)
-    Types.DTDrbd8 1
+  Instance.create "inst-unnamed" mem dsk [dsk] vcpus Types.Running [] True (-1)
+    (-1) Types.DTDrbd8 1
 
 -- | Create a small cluster by repeating a node spec.
 makeSmallCluster :: Node.Node -> Int -> Node.List
diff --git a/test/hs/shelltests/htools-hail.test b/test/hs/shelltests/htools-hail.test
index 55dd3ca20a114ab28534406b7047b9c480dc7ca8..4da03d89a0270f9b71410c5acd40719a5751cbcd 100644
--- a/test/hs/shelltests/htools-hail.test
+++ b/test/hs/shelltests/htools-hail.test
@@ -38,6 +38,15 @@ echo '{"request": 0}' | ./test/hs/hail -
 >>> /"success":true,"info":"Request successful: 0 instances failed to move and 1 were moved successfully"/
 >>>= 0
 
+# check that hail correctly applies the disk policy on a per-disk basis
+./test/hs/hail $TESTDATA_DIR/hail-alloc-twodisks.json
+>>> /"success":true,.*,"result":\["node1"\]/
+>>>= 0
+
+./test/hs/hail $TESTDATA_DIR/hail-alloc-invalid-twodisks.json
+>>> /"success":false,.*FailDisk: 1/
+>>>= 0
+
 # check that hail can use the simu backend
 ./test/hs/hail --simu p,8,8T,16g,16 $TESTDATA_DIR/hail-alloc-drbd.json
 >>> /"success":true,/
diff --git a/tools/users-setup.in b/tools/users-setup.in
new file mode 100644
index 0000000000000000000000000000000000000000..8be0a0125e58f1a031fedb8cde4afcb647fe9467
--- /dev/null
+++ b/tools/users-setup.in
@@ -0,0 +1,26 @@
+#!/bin/sh
+#Create common groups
+addgroup --system @GNTDAEMONSGROUP@
+addgroup --system @GNTADMINGROUP@
+
+#Create user-specific groups
+addgroup --system @GNTMASTERUSER@
+addgroup --system @GNTRAPIUSER@
+addgroup --system @GNTCONFDUSER@
+
+#Create users
+adduser --system --ingroup @GNTMASTERDGROUP@ @GNTMASTERUSER@
+adduser --system --ingroup @GNTRAPIGROUP@ @GNTRAPIUSER@
+adduser --system --ingroup @GNTCONFDGROUP@ @GNTCONFDUSER@
+adduser --system @GNTNODEDUSER@
+
+#Add users to extra groups
+for u in @GNTMASTERUSER@ @GNTCONFDUSER@ @GNTRAPIUSER@
+do
+  adduser $u @GNTDAEMONSGROUP@
+done
+
+for u in @GNTMASTERUSER@ @GNTRAPIUSER@
+do
+  adduser $u @GNTADMINGROUP@
+done