diff --git a/Makefile.am b/Makefile.am
index df4fa565de0865d33f1cfe28475b03e5746c28f0..78cefa88a15bf7559ac307891c516648f741f7ac 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -254,6 +254,8 @@ EXTRA_DIST = \
 	doc/examples/ganeti.cron.in \
 	doc/examples/gnt-config-backup.in \
 	doc/examples/dumb-allocator \
+	doc/examples/ganeti.default \
+	doc/examples/ganeti.default-debug \
 	doc/examples/hooks/ethers \
 	doc/examples/hooks/ipsec.in \
 	test/testutils.py \
@@ -353,9 +355,15 @@ srclink_files = \
 	$(all_python_code)
 
 check_python_code = \
-	autotools/build-bash-completion \
+	$(BUILD_BASH_COMPLETION) \
 	$(all_python_code)
 
+lint_python_code = \
+	ganeti \
+	$(dist_sbin_SCRIPTS) \
+	$(dist_tools_SCRIPTS) \
+	$(BUILD_BASH_COMPLETION)
+
 devel/upload: devel/upload.in $(REPLACE_VARS_SED)
 	sed -f $(REPLACE_VARS_SED) < $< > $@
 	chmod u+x $@
@@ -477,7 +485,8 @@ check-local:
 
 .PHONY: lint
 lint: ganeti
-	pylint ganeti $(dist_sbin_SCRIPTS) $(dist_tools_SCRIPTS)
+	@test -n "$(PYLINT)" || { echo 'pylint' not found during configure; exit 1; }
+	$(PYLINT) $(LINT_OPTS) $(lint_python_code)
 
 # a dist hook rule for catching revision control directories
 distcheck-hook:
diff --git a/autotools/build-bash-completion b/autotools/build-bash-completion
index aa130d42f289c7669ee60ffe3e4a82b9d3497feb..4a6f66c1dc0128f0cff869859294daed64ea05fd 100755
--- a/autotools/build-bash-completion
+++ b/autotools/build-bash-completion
@@ -19,9 +19,14 @@
 # 02110-1301, USA.
 
 
-import optparse
+"""Script to generate bash_completion script for Ganeti.
+
+"""
+
+# pylint: disable-msg=C0103
+# [C0103] Invalid name build-bash-completion
+
 import os
-import sys
 import re
 from cStringIO import StringIO
 
@@ -263,6 +268,8 @@ class CompletionWriter:
     self.args = args
 
     for opt in opts:
+      # While documented, these variables aren't seen as public attributes by
+      # pylint. pylint: disable-msg=W0212
       opt.all_names = sorted(opt._short_opts + opt._long_opts)
 
   def _FindFirstArgument(self, sw):
@@ -591,7 +598,7 @@ def GetCommands(filename, module):
   """
   try:
     commands = getattr(module, "commands")
-  except AttributeError, err:
+  except AttributeError:
     raise Exception("Script %s doesn't have 'commands' attribute" %
                     filename)
 
diff --git a/autotools/check-python-code b/autotools/check-python-code
index dcacbf1327964d563f621f08b4f627c5dee21212..4f14ccb0cf50e0c409b23317a0ae30b1fc535c42 100755
--- a/autotools/check-python-code
+++ b/autotools/check-python-code
@@ -18,7 +18,11 @@
 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 # 02110-1301, USA.
 
-let problems=0
+set -e
+
+# "[...] If the last ARG evaluates to 0, let returns 1; 0 is returned
+# otherwise.", hence ignoring the return value.
+let problems=0 || :
 
 for script; do
   if grep -n -H -F $'\t' "$script"; then
diff --git a/configure.ac b/configure.ac
index 2a2ac0e0d689ed06cfe16fb7b945f736a39b9b05..441bd3a0d7244090295470444999ab8959f765a0 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
 m4_define([gnt_version_major], [2])
 m4_define([gnt_version_minor], [1])
 m4_define([gnt_version_revision], [0])
-m4_define([gnt_version_suffix], [~rc2])
+m4_define([gnt_version_suffix], [~rc4])
 m4_define([gnt_version_full],
           m4_format([%d.%d.%d%s],
                     gnt_version_major, gnt_version_minor,
@@ -153,6 +153,14 @@ then
   AC_MSG_WARN([dot (from the graphviz suite) not found, documentation rebuild not possible])
 fi
 
+# Check for pylint
+AC_ARG_VAR(PYLINT, [pylint path])
+AC_PATH_PROG(PYLINT, [pylint], [])
+if test -z "$PYLINT"
+then
+  AC_MSG_WARN([pylint not found, checking code will not be possible])
+fi
+
 # Check for socat
 AC_ARG_VAR(SOCAT, [socat path])
 AC_PATH_PROG(SOCAT, [socat], [])
diff --git a/daemons/daemon-util.in b/daemons/daemon-util.in
index ab58ac5d63f9c134a3c5f8f6b9918d2bc85b1cbc..1e928ebdd6cd14ab70ed615286b877f37916580e 100755
--- a/daemons/daemon-util.in
+++ b/daemons/daemon-util.in
@@ -62,7 +62,7 @@ start() {
   local ucname=$(tr a-z A-Z <<< ${name#ganeti-})
 
   # Read $<daemon>_ARGS and $EXTRA_<daemon>_ARGS
-  eval local args="\$${ucname}_ARGS \$EXTRA_${ucname}_ARGS"
+  eval local args="\"\$${ucname}_ARGS \$EXTRA_${ucname}_ARGS\""
 
   start-stop-daemon --start --quiet --oknodo \
     --pidfile $(_daemon_pidfile $name) \
diff --git a/daemons/ganeti-masterd b/daemons/ganeti-masterd
index b13bb93f29ecf335042df520932b212fad3a7d91..ddcef6676e7130494be40b8718caa44748afe7be 100755
--- a/daemons/ganeti-masterd
+++ b/daemons/ganeti-masterd
@@ -100,7 +100,8 @@ class IOServer(SocketServer.UnixStreamServer):
 
   def setup_queue(self):
     self.context = GanetiContext()
-    self.request_workers = workerpool.WorkerPool(CLIENT_REQUEST_WORKERS,
+    self.request_workers = workerpool.WorkerPool("ClientReq",
+                                                 CLIENT_REQUEST_WORKERS,
                                                  ClientRequestWorker)
 
   def process_request(self, request, client_address):
@@ -280,6 +281,12 @@ class ClientOps:
       op = opcodes.OpQueryClusterInfo()
       return self._Query(op)
 
+    elif method == luxi.REQ_QUERY_TAGS:
+      kind, name = args
+      logging.info("Received tags query request")
+      op = opcodes.OpGetTags(kind=kind, name=name)
+      return self._Query(op)
+
     elif method == luxi.REQ_QUEUE_SET_DRAIN_FLAG:
       drain_flag = args
       logging.info("Received queue drain flag change request to %s",
diff --git a/devel/release b/devel/release
index ba1c12867c89cda5a99b87f151ac6531d9008bc3..343781738d3d15696b772c76ce1784765e44a1d9 100755
--- a/devel/release
+++ b/devel/release
@@ -32,18 +32,35 @@ set -e
 : ${URL:=git://git.ganeti.org/ganeti.git}
 TAG="$1"
 
-TMPDIR=`mktemp -d`
+if [[ -z "$TAG" ]]; then
+  echo "Usage: $0 <tree-ish>" >&2
+  exit 1
+fi
+
+echo "Using Git repository $URL"
+
+TMPDIR=$(mktemp -d -t gntrelease.XXXXXXXXXX)
 cd $TMPDIR
+
 echo "Cloning the repository under $TMPDIR ..."
 git clone -q "$URL" dist
 cd dist
 git checkout $TAG
 ./autogen.sh
 ./configure
+
 VERSION=$(sed -n -e '/^PACKAGE_VERSION =/ s/^PACKAGE_VERSION = // p' Makefile)
+
 make distcheck
 fakeroot make dist
 tar tzvf ganeti-$VERSION.tar.gz
+
+echo
+echo 'MD5:'
+md5sum ganeti-$VERSION.tar.gz
+echo
+echo 'SHA1:'
 sha1sum ganeti-$VERSION.tar.gz
+echo
 echo "The archive is at $PWD/ganeti-$VERSION.tar.gz"
 echo "Please copy it and remove the temporary directory when done."
diff --git a/devel/upload.in b/devel/upload.in
index 8f31c3bcb0d8491966a1602ad154f872d0f5e824..f2325cd8905fe6239ea243539523d15071e6ef1f 100644
--- a/devel/upload.in
+++ b/devel/upload.in
@@ -78,6 +78,10 @@ make $make_args install DESTDIR="$TXD"
 install -D --mode=0755 doc/examples/ganeti.initd \
   "$TXD/$SYSCONFDIR/init.d/ganeti"
 
+[ -f doc/examples/ganeti.default-debug ] && \
+install -D --mode=0644 doc/examples/ganeti.default-debug \
+  "$TXD/$SYSCONFDIR/default/ganeti"
+
 [ -f doc/examples/bash_completion ] && \
 install -D --mode=0644 doc/examples/bash_completion \
   "$TXD/$SYSCONFDIR/bash_completion.d/ganeti"
diff --git a/doc/examples/ganeti.default b/doc/examples/ganeti.default
new file mode 100644
index 0000000000000000000000000000000000000000..7f7414e44d7edf851716e0ae725d6c2b8b878fa9
--- /dev/null
+++ b/doc/examples/ganeti.default
@@ -0,0 +1,5 @@
+# Default arguments for Ganeti daemons
+NODED_ARGS=""
+MASTERD_ARGS=""
+RAPI_ARGS=""
+CONFD_ARGS=""
diff --git a/doc/examples/ganeti.default-debug b/doc/examples/ganeti.default-debug
new file mode 100644
index 0000000000000000000000000000000000000000..f28adbe75cf2a943625892d08c39676352e63ff4
--- /dev/null
+++ b/doc/examples/ganeti.default-debug
@@ -0,0 +1,5 @@
+# Default arguments for Ganeti daemons (debug mode)
+NODED_ARGS="-d"
+MASTERD_ARGS="-d"
+RAPI_ARGS="-d"
+CONFD_ARGS="-d"
diff --git a/doc/install.rst b/doc/install.rst
index f8a022143c4619a0105d31896976cac98b7d6fc3..1f5b442222cf97d4b2631ba92014f2709aafdc27 100644
--- a/doc/install.rst
+++ b/doc/install.rst
@@ -549,6 +549,21 @@ pass ``--enabled-hypervisors=kvm`` to the init command.
 You can also invoke the command with the ``--help`` option in order to
 see all the possibilities.
 
+Hypervisor/Network/Cluster parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Please note that the default hypervisor/network/cluster parameters may
+not be the correct one for your environment. Carefully check them, and
+change them at cluster init time, or later with ``gnt-cluster modify``.
+
+Your instance types, networking environment, hypervisor type and version
+may all affect what kind of parameters should be used on your cluster.
+
+For example kvm instances are by default configured to use a host
+kernel, and to be reached via serial console, which works nice for linux
+paravirtualized instances. If you want fully virtualized instances you
+may want to handle their kernel inside the instance, and to use VNC.
+
 Joining the nodes to the cluster
 ++++++++++++++++++++++++++++++++
 
diff --git a/lib/bootstrap.py b/lib/bootstrap.py
index 5b277e9d8b2c20a618e9c2b630be9d0606a4779f..a162897e493c9809e44d707d43c1867152accc58 100644
--- a/lib/bootstrap.py
+++ b/lib/bootstrap.py
@@ -107,7 +107,7 @@ def GenerateHmacKey(file_name):
   @param file_name: Path to output file
 
   """
-  utils.WriteFile(file_name, data=utils.GenerateSecret(), mode=0400)
+  utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400)
 
 
 def _InitGanetiServerSetup(master_name):
diff --git a/lib/cli.py b/lib/cli.py
index 924208d3d588788bae9b72335b6c39c4d1cbf191..8e4f1f30855941c1092036ca82db6cd2c5464176 100644
--- a/lib/cli.py
+++ b/lib/cli.py
@@ -252,7 +252,6 @@ ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
 
 
-
 def _ExtractTagsObject(opts, args):
   """Extract the tag type object.
 
@@ -313,8 +312,8 @@ def ListTags(opts, args):
 
   """
   kind, name = _ExtractTagsObject(opts, args)
-  op = opcodes.OpGetTags(kind=kind, name=name)
-  result = SubmitOpCode(op)
+  cl = GetClient()
+  result = cl.QueryTags(kind, name)
   result = list(result)
   result.sort()
   for tag in result:
@@ -385,7 +384,7 @@ def _SplitKeyVal(opt, data):
   """
   kv_dict = {}
   if data:
-    for elem in data.split(","):
+    for elem in utils.UnescapeAndSplit(data, sep=","):
       if "=" in elem:
         key, val = elem.split("=", 1)
       else:
@@ -1581,6 +1580,12 @@ def GenerateTable(headers, fields, separator, data,
       args.append(hdr)
     result.append(format % tuple(args))
 
+  if separator is None:
+    assert len(mlens) == len(fields)
+
+    if fields and not numfields.Matches(fields[-1]):
+      mlens[-1] = 0
+
   for line in data:
     args = []
     if line is None:
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index 2b1d611f78e656ceed8283b33ecc4d8198a5a7ef..fd75ecba69b3ac5cf2d1a959865e74becc85d286 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -1843,7 +1843,8 @@ class LURenameCluster(LogicalUnit):
       "NEW_NAME": self.op.name,
       }
     mn = self.cfg.GetMasterNode()
-    return env, [mn], [mn]
+    all_nodes = self.cfg.GetNodeList()
+    return env, [mn], all_nodes
 
   def CheckPrereq(self):
     """Verify that the passed name is a valid one.
@@ -5769,16 +5770,14 @@ class LUCreateInstance(LogicalUnit):
       # MAC address verification
       mac = nic.get("mac", constants.VALUE_AUTO)
       if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-        if not utils.IsValidMac(mac.lower()):
-          raise errors.OpPrereqError("Invalid MAC address specified: %s" %
-                                     mac, errors.ECODE_INVAL)
-        else:
-          try:
-            self.cfg.ReserveMAC(mac, self.proc.GetECId())
-          except errors.ReservationError:
-            raise errors.OpPrereqError("MAC address %s already in use"
-                                       " in cluster" % mac,
-                                       errors.ECODE_NOTUNIQUE)
+        mac = utils.NormalizeAndValidateMac(mac)
+
+        try:
+          self.cfg.ReserveMAC(mac, self.proc.GetECId())
+        except errors.ReservationError:
+          raise errors.OpPrereqError("MAC address %s already in use"
+                                     " in cluster" % mac,
+                                     errors.ECODE_NOTUNIQUE)
 
       # bridge verification
       bridge = nic.get("bridge", None)
@@ -7532,9 +7531,8 @@ class LUSetInstanceParams(LogicalUnit):
       if 'mac' in nic_dict:
         nic_mac = nic_dict['mac']
         if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
-          if not utils.IsValidMac(nic_mac):
-            raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac,
-                                       errors.ECODE_INVAL)
+          nic_mac = utils.NormalizeAndValidateMac(nic_mac)
+
         if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO:
           raise errors.OpPrereqError("'auto' is not a valid MAC address when"
                                      " modifying an existing nic",
diff --git a/lib/confd/client.py b/lib/confd/client.py
index e3288683f03d7ea071c35e2c462fb6bd7dd19cab..dba30ee0becc57a98c87c83fbcba0376c464a906 100644
--- a/lib/confd/client.py
+++ b/lib/confd/client.py
@@ -133,7 +133,8 @@ class ConfdClient:
     # pylint: disable-msg=W0201
     if not isinstance(peers, list):
       raise errors.ProgrammerError("peers must be a list")
-    self._peers = peers
+    # make a copy of peers, since we're going to shuffle the list, later
+    self._peers = list(peers)
 
   def _PackRequest(self, request, now=None):
     """Prepare a request to be sent on the wire.
diff --git a/lib/http/client.py b/lib/http/client.py
index 490e4da665b09d43be7f67aaeb545f576fee4587..5ca76a16375de8eb18a400b78b9cc94dffdeed8b 100644
--- a/lib/http/client.py
+++ b/lib/http/client.py
@@ -95,6 +95,14 @@ class HttpClientRequest(object):
     self.resp_headers = None
     self.resp_body = None
 
+  def __repr__(self):
+    status = ["%s.%s" % (self.__class__.__module__, self.__class__.__name__),
+              "%s:%s" % (self.host, self.port),
+              self.method,
+              self.path]
+
+    return "<%s at %#x>" % (" ".join(status), id(self))
+
 
 class _HttpClientToServerMessageWriter(http.HttpMessageWriter):
   pass
@@ -328,6 +336,12 @@ class _HttpClientPendingRequest(object):
     # Thread synchronization
     self.done = threading.Event()
 
+  def __repr__(self):
+    status = ["%s.%s" % (self.__class__.__module__, self.__class__.__name__),
+              "req=%r" % self.request]
+
+    return "<%s at %#x>" % (" ".join(status), id(self))
+
 
 class HttpClientWorker(workerpool.BaseWorker):
   """HTTP client worker class.
@@ -342,7 +356,8 @@ class HttpClientWorker(workerpool.BaseWorker):
 
 class HttpClientWorkerPool(workerpool.WorkerPool):
   def __init__(self, manager):
-    workerpool.WorkerPool.__init__(self, HTTP_CLIENT_THREADS,
+    workerpool.WorkerPool.__init__(self, "HttpClient",
+                                   HTTP_CLIENT_THREADS,
                                    HttpClientWorker)
     self.manager = manager
 
diff --git a/lib/hypervisor/hv_kvm.py b/lib/hypervisor/hv_kvm.py
index 92a2854e3323eba23f055d9c3d6df28089ffaa9e..28bcdc26e94dc389b3d58a13b00217bf28d8d45c 100644
--- a/lib/hypervisor/hv_kvm.py
+++ b/lib/hypervisor/hv_kvm.py
@@ -80,6 +80,8 @@ class KVMHypervisor(hv_base.BaseHypervisor):
 
   _MIGRATION_STATUS_RE = re.compile('Migration\s+status:\s+(\w+)',
                                     re.M | re.I)
+  _MIGRATION_INFO_MAX_BAD_ANSWERS = 5
+  _MIGRATION_INFO_RETRY_DELAY = 2
 
   _KVM_NETWORK_SCRIPT = constants.SYSCONFDIR + "/ganeti/kvm-vif-bridge"
 
@@ -675,26 +677,37 @@ class KVMHypervisor(hv_base.BaseHypervisor):
 
     info_command = 'info migrate'
     done = False
+    broken_answers = 0
     while not done:
       result = self._CallMonitorCommand(instance_name, info_command)
       match = self._MIGRATION_STATUS_RE.search(result.stdout)
       if not match:
-        raise errors.HypervisorError("Unknown 'info migrate' result: %s" %
-                                     result.stdout)
+        broken_answers += 1
+        if not result.stdout:
+          logging.info("KVM: empty 'info migrate' result")
+        else:
+          logging.warning("KVM: unknown 'info migrate' result: %s",
+                          result.stdout)
+        time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
       else:
         status = match.group(1)
         if status == 'completed':
           done = True
         elif status == 'active':
-          time.sleep(2)
+          # reset the broken answers count
+          broken_answers = 0
+          time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
         elif status == 'failed' or status == 'cancelled':
           if not live:
             self._CallMonitorCommand(instance_name, 'cont')
           raise errors.HypervisorError("Migration %s at the kvm level" %
                                        status)
         else:
-          logging.info("KVM: unknown migration status '%s'", status)
-          time.sleep(2)
+          logging.warning("KVM: unknown migration status '%s'", status)
+          broken_answers += 1
+          time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
+      if broken_answers >= self._MIGRATION_INFO_MAX_BAD_ANSWERS:
+        raise errors.HypervisorError("Too many 'info migrate' broken answers")
 
     utils.KillProcess(pid)
     self._RemoveInstanceRuntimeFiles(pidfile, instance_name)
diff --git a/lib/jqueue.py b/lib/jqueue.py
index 2c2345baa470a94e1cdbddb6c4b4fd323e5bd4f7..7824d3f44f3f39db92c4cba2390082089b8f9b69 100644
--- a/lib/jqueue.py
+++ b/lib/jqueue.py
@@ -190,6 +190,13 @@ class _QueuedJob(object):
     # Condition to wait for changes
     self.change = threading.Condition(self.queue._lock)
 
+  def __repr__(self):
+    status = ["%s.%s" % (self.__class__.__module__, self.__class__.__name__),
+              "id=%s" % self.id,
+              "ops=%s" % ",".join([op.input.Summary() for op in self.ops])]
+
+    return "<%s at %#x>" % (" ".join(status), id(self))
+
   @classmethod
   def Restore(cls, queue, state):
     """Restore a _QueuedJob from serialized state:
@@ -430,8 +437,7 @@ class _JobQueueWorker(workerpool.BaseWorker):
     @param job: the job to be processed
 
     """
-    logging.info("Worker %s processing job %s",
-                  self.worker_id, job.id)
+    logging.info("Processing job %s", job.id)
     proc = mcpu.Processor(self.pool.queue.context, job.id)
     queue = job.queue
     try:
@@ -527,8 +533,7 @@ class _JobQueueWorker(workerpool.BaseWorker):
       finally:
         queue.release()
 
-      logging.info("Worker %s finished job %s, status = %s",
-                   self.worker_id, job_id, status)
+      logging.info("Finished job %s, status = %s", job_id, status)
 
 
 class _JobQueueWorkerPool(workerpool.WorkerPool):
@@ -536,7 +541,8 @@ class _JobQueueWorkerPool(workerpool.WorkerPool):
 
   """
   def __init__(self, queue):
-    super(_JobQueueWorkerPool, self).__init__(JOBQUEUE_THREADS,
+    super(_JobQueueWorkerPool, self).__init__("JobQueue",
+                                              JOBQUEUE_THREADS,
                                               _JobQueueWorker)
     self.queue = queue
 
@@ -1315,7 +1321,7 @@ class JobQueue(object):
     all_job_ids = self._GetJobIDsUnlocked(archived=False)
     pending = []
     for idx, job_id in enumerate(all_job_ids):
-      last_touched = idx
+      last_touched = idx + 1
 
       # Not optimal because jobs could be pending
       # TODO: Measure average duration for job archival and take number of
@@ -1345,7 +1351,7 @@ class JobQueue(object):
     if pending:
       archived_count += self._ArchiveJobsUnlocked(pending)
 
-    return (archived_count, len(all_job_ids) - last_touched - 1)
+    return (archived_count, len(all_job_ids) - last_touched)
 
   @staticmethod
   def _GetJobInfoUnlocked(job, fields):
diff --git a/lib/locking.py b/lib/locking.py
index 6ba74f2ea5c6159e07778af7b44c5037f1fbdf1c..f30e8b703d994debd1f3983484e38ba19938f009 100644
--- a/lib/locking.py
+++ b/lib/locking.py
@@ -775,7 +775,9 @@ class LockSet:
   def _release_and_delete_owned(self):
     """Release and delete all resources owned by the current thread"""
     for lname in self._list_owned():
-      self.__lockdict[lname].release()
+      lock = self.__lockdict[lname]
+      if lock._is_owned():
+        lock.release()
       self._del_owned(name=lname)
 
   def __names(self):
@@ -839,8 +841,6 @@ class LockSet:
         # Support passing in a single resource to acquire rather than many
         if isinstance(names, basestring):
           names = [names]
-        else:
-          names = sorted(names)
 
         return self.__acquire_inner(names, False, shared,
                                     running_timeout.Remaining, test_notify)
@@ -889,11 +889,11 @@ class LockSet:
 
     # First we look the locks up on __lockdict. We have no way of being sure
     # they will still be there after, but this makes it a lot faster should
-    # just one of them be the already wrong
-    for lname in utils.UniqueSequence(names):
+    # just one of them be the already wrong. Using a sorted sequence to prevent
+    # deadlocks.
+    for lname in sorted(utils.UniqueSequence(names)):
       try:
         lock = self.__lockdict[lname] # raises KeyError if lock is not there
-        acquire_list.append((lname, lock))
       except KeyError:
         if want_all:
           # We are acquiring all the set, it doesn't matter if this particular
@@ -902,6 +902,8 @@ class LockSet:
 
         raise errors.LockError("Non-existing lock in set (%s)" % lname)
 
+      acquire_list.append((lname, lock))
+
     # This will hold the locknames we effectively acquired.
     acquired = set()
 
diff --git a/lib/luxi.py b/lib/luxi.py
index ad11aef1d43b431488e4b49e7c25280e4b564639..f588e81570c5396db9f76913deb684c670341481 100644
--- a/lib/luxi.py
+++ b/lib/luxi.py
@@ -57,6 +57,7 @@ REQ_QUERY_NODES = "QueryNodes"
 REQ_QUERY_EXPORTS = "QueryExports"
 REQ_QUERY_CONFIG_VALUES = "QueryConfigValues"
 REQ_QUERY_CLUSTER_INFO = "QueryClusterInfo"
+REQ_QUERY_TAGS = "QueryTags"
 REQ_QUEUE_SET_DRAIN_FLAG = "SetDrainFlag"
 REQ_SET_WATCHER_PAUSE = "SetWatcherPause"
 
@@ -444,5 +445,8 @@ class Client(object):
   def QueryConfigValues(self, fields):
     return self.CallMethod(REQ_QUERY_CONFIG_VALUES, fields)
 
+  def QueryTags(self, kind, name):
+    return self.CallMethod(REQ_QUERY_TAGS, (kind, name))
+
 
 # TODO: class Server(object)
diff --git a/lib/mcpu.py b/lib/mcpu.py
index af4ff1a5ac0fc5747250fe386aa6abe832289a53..933d59636b6cdd6c78b8d4fb0f2e3a27dba394ef 100644
--- a/lib/mcpu.py
+++ b/lib/mcpu.py
@@ -275,7 +275,7 @@ class Processor(object):
     elif isinstance(names, basestring):
       parts.append(names)
     else:
-      parts.append(",".join(names))
+      parts.append(",".join(sorted(names)))
 
     if shared:
       parts.append("shared")
diff --git a/lib/utils.py b/lib/utils.py
index 97c28e23ebc616446d4d0251ca02953fd7fa8fe7..6e3340a77bacfc9d8a5dabc22f68247bba955305 100644
--- a/lib/utils.py
+++ b/lib/utils.py
@@ -1398,20 +1398,26 @@ def UniqueSequence(seq):
   return [i for i in seq if i not in seen and not seen.add(i)]
 
 
-def IsValidMac(mac):
-  """Predicate to check if a MAC address is valid.
+def NormalizeAndValidateMac(mac):
+  """Normalizes and check if a MAC address is valid.
 
   Checks whether the supplied MAC address is formally correct, only
-  accepts colon separated format.
+  accepts colon separated format. Normalize it to all lower.
 
   @type mac: str
   @param mac: the MAC to be validated
-  @rtype: boolean
-  @return: True is the MAC seems valid
+  @rtype: str
+  @return: returns the normalized and validated MAC.
+
+  @raise errors.OpPrereqError: If the MAC isn't valid
 
   """
-  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$")
-  return mac_check.match(mac) is not None
+  mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I)
+  if not mac_check.match(mac):
+    raise errors.OpPrereqError("Invalid MAC address specified: %s" %
+                               mac, errors.ECODE_INVAL)
+
+  return mac.lower()
 
 
 def TestDelay(duration):
@@ -1891,6 +1897,48 @@ def SafeEncode(text):
   return resu
 
 
+def UnescapeAndSplit(text, sep=","):
+  """Split and unescape a string based on a given separator.
+
+  This function splits a string based on a separator where the
+  separator itself can be escape in order to be an element of the
+  elements. The escaping rules are (assuming coma being the
+  separator):
+    - a plain , separates the elements
+    - a sequence \\\\, (double backslash plus comma) is handled as a
+      backslash plus a separator comma
+    - a sequence \, (backslash plus comma) is handled as a
+      non-separator comma
+
+  @type text: string
+  @param text: the string to split
+  @type sep: string
+  @param text: the separator
+  @rtype: string
+  @return: a list of strings
+
+  """
+  # we split the list by sep (with no escaping at this stage)
+  slist = text.split(sep)
+  # next, we revisit the elements and if any of them ended with an odd
+  # number of backslashes, then we join it with the next
+  rlist = []
+  while slist:
+    e1 = slist.pop(0)
+    if e1.endswith("\\"):
+      num_b = len(e1) - len(e1.rstrip("\\"))
+      if num_b % 2 == 1:
+        e2 = slist.pop(0)
+        # here the backslashes remain (all), and will be reduced in
+        # the next step
+        rlist.append(e1 + sep + e2)
+        continue
+    rlist.append(e1)
+  # finally, replace backslash-something with something
+  rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
+  return rlist
+
+
 def CommaJoin(names):
   """Nicely join a set of identifiers.
 
diff --git a/lib/workerpool.py b/lib/workerpool.py
index 2d3937a1648c247b6d9d4f1cb02c95f4e7cb05d3..1ec89af67a1a913eba6240696b60089957fd1644 100644
--- a/lib/workerpool.py
+++ b/lib/workerpool.py
@@ -42,9 +42,8 @@ class BaseWorker(threading.Thread, object):
     @param worker_id: identifier for this worker
 
     """
-    super(BaseWorker, self).__init__()
+    super(BaseWorker, self).__init__(name=worker_id)
     self.pool = pool
-    self.worker_id = worker_id
     self._current_task = None
 
   def ShouldTerminate(self):
@@ -89,12 +88,12 @@ class BaseWorker(threading.Thread, object):
 
           # We only wait if there's no task for us.
           if not pool._tasks:
-            logging.debug("Worker %s: waiting for tasks", self.worker_id)
+            logging.debug("Waiting for tasks")
 
             # wait() releases the lock and sleeps until notified
             pool._pool_to_worker.wait()
 
-            logging.debug("Worker %s: notified while waiting", self.worker_id)
+            logging.debug("Notified while waiting")
 
             # Were we woken up in order to terminate?
             if pool._ShouldWorkerTerminateUnlocked(self):
@@ -114,14 +113,11 @@ class BaseWorker(threading.Thread, object):
 
         # Run the actual task
         try:
-          logging.debug("Worker %s: starting task %r",
-                        self.worker_id, self._current_task)
+          logging.debug("Starting task %r", self._current_task)
           self.RunTask(*self._current_task)
-          logging.debug("Worker %s: done with task %r",
-                        self.worker_id, self._current_task)
+          logging.debug("Done with task %r", self._current_task)
         except: # pylint: disable-msg=W0702
-          logging.error("Worker %s: Caught unhandled exception",
-                        self.worker_id, exc_info=True)
+          logging.exception("Caught unhandled exception")
       finally:
         # Notify pool
         pool._lock.acquire()
@@ -132,7 +128,7 @@ class BaseWorker(threading.Thread, object):
         finally:
           pool._lock.release()
 
-    logging.debug("Worker %s: terminates", self.worker_id)
+    logging.debug("Terminates")
 
   def RunTask(self, *args):
     """Function called to start a task.
@@ -153,7 +149,7 @@ class WorkerPool(object):
   guaranteed to finish in the same order.
 
   """
-  def __init__(self, num_workers, worker_class):
+  def __init__(self, name, num_workers, worker_class):
     """Constructor for worker pool.
 
     @param num_workers: number of workers to be started
@@ -168,6 +164,7 @@ class WorkerPool(object):
     self._pool_to_worker = threading.Condition(self._lock)
     self._worker_to_pool = threading.Condition(self._lock)
     self._worker_class = worker_class
+    self._name = name
     self._last_worker_id = 0
     self._workers = []
     self._quiescing = False
@@ -253,7 +250,8 @@ class WorkerPool(object):
 
     """
     self._last_worker_id += 1
-    return self._last_worker_id
+
+    return "%s%d" % (self._name, self._last_worker_id)
 
   def _ResizeUnlocked(self, num_workers):
     """Changes the number of workers.
diff --git a/man/gnt-instance.sgml b/man/gnt-instance.sgml
index d56a242d05fcd6fa391e280cb0d80d48b64af734..3c3373f1c3607f4bd2569b436b84420099ddea6e 100644
--- a/man/gnt-instance.sgml
+++ b/man/gnt-instance.sgml
@@ -2,7 +2,7 @@
 
   <!-- Fill in your name for FIRSTNAME and SURNAME. -->
   <!-- Please adjust the date whenever revising the manpage. -->
-  <!ENTITY dhdate      "<date>February 11, 2009</date>">
+  <!ENTITY dhdate      "<date>January 22, 2010</date>">
   <!-- SECTION should be 1-8, maybe w/ subsection other parameters are
        allowed: see man(7), man(1). -->
   <!ENTITY dhsection   "<manvolnum>8</manvolnum>">
@@ -22,6 +22,7 @@
       <year>2007</year>
       <year>2008</year>
       <year>2009</year>
+      <year>2010</year>
       <holder>Google Inc.</holder>
     </copyright>
     &dhdate;
@@ -1491,6 +1492,10 @@ instance5: 11225
             <arg>--primary</arg>
             <arg>--secondary</arg>
             <arg>--all</arg>
+            <arg>--tags</arg>
+            <arg>--node-tags</arg>
+            <arg>--pri-node-tags</arg>
+            <arg>--sec-node-tags</arg>
           </group>
           <sbr>
           <arg>-H <option>key=value...</option></arg>
@@ -1544,6 +1549,36 @@ instance5: 11225
                 arguments accepted)</simpara>
               </listitem>
             </varlistentry>
+            <varlistentry>
+              <term>--tags</term>
+              <listitem>
+                <simpara>will start all instances in the cluster with
+                the tags given as arguments</simpara>
+              </listitem>
+            </varlistentry>
+            <varlistentry>
+              <term>--node-tags</term>
+              <listitem>
+                <simpara>will start all instances in the cluster on
+                nodes with the tags given as arguments</simpara>
+              </listitem>
+            </varlistentry>
+            <varlistentry>
+              <term>--pri-node-tags</term>
+              <listitem>
+                <simpara>will start all instances in the cluster on
+                primary nodes with the tags given as
+                arguments</simpara>
+              </listitem>
+            </varlistentry>
+            <varlistentry>
+              <term>--sec-node-tags</term>
+              <listitem>
+                <simpara>will start all instances in the cluster on
+                secondary nodes with the tags given as
+                arguments</simpara>
+              </listitem>
+            </varlistentry>
           </variablelist>
         </para>
 
@@ -1618,6 +1653,10 @@ instance5: 11225
             <arg>--primary</arg>
             <arg>--secondary</arg>
             <arg>--all</arg>
+            <arg>--tags</arg>
+            <arg>--node-tags</arg>
+            <arg>--pri-node-tags</arg>
+            <arg>--sec-node-tags</arg>
           </group>
           <sbr>
           <arg>--submit</arg>
@@ -1642,8 +1681,10 @@ instance5: 11225
 
         <para>
           The <option>--instance</option>, <option>--node</option>,
-          <option>--primary</option>, <option>--secondary</option> and
-          <option>--all</option> options are similar as for the
+          <option>--primary</option>, <option>--secondary</option>,
+          <option>--all</option>, <option>--tags</option>,
+          <option>--node-tags</option>, <option>--pri-node-tags</option> and
+          <option>--sec-node-tags</option> options are similar as for the
           <command>startup</command> command and they influence the
           actual instances being shutdown.
         </para>
@@ -1685,6 +1726,10 @@ instance5: 11225
             <arg>--primary</arg>
             <arg>--secondary</arg>
             <arg>--all</arg>
+            <arg>--tags</arg>
+            <arg>--node-tags</arg>
+            <arg>--pri-node-tags</arg>
+            <arg>--sec-node-tags</arg>
           </group>
           <sbr>
           <arg>--submit</arg>
@@ -1711,8 +1756,10 @@ instance5: 11225
 
         <para>
           The <option>--instance</option>, <option>--node</option>,
-          <option>--primary</option>, <option>--secondary</option> and
-          <option>--all</option> options are similar as for the
+          <option>--primary</option>, <option>--secondary</option>,
+          <option>--all</option>, <option>--tags</option>,
+          <option>--node-tags</option>, <option>--pri-node-tags</option> and
+          <option>--sec-node-tags</option> options are similar as for the
           <command>startup</command> command and they influence the
           actual instances being rebooted.
         </para>
diff --git a/scripts/gnt-instance b/scripts/gnt-instance
index 56eb510314bcb4c19c09afabdcfaaf9b2ae226de..dad92f09456c0230a4c0a35967f3e191fafd3f01 100755
--- a/scripts/gnt-instance
+++ b/scripts/gnt-instance
@@ -42,7 +42,16 @@ _SHUTDOWN_CLUSTER = "cluster"
 _SHUTDOWN_NODES_BOTH = "nodes"
 _SHUTDOWN_NODES_PRI = "nodes-pri"
 _SHUTDOWN_NODES_SEC = "nodes-sec"
+_SHUTDOWN_NODES_BOTH_BY_TAGS = "nodes-by-tags"
+_SHUTDOWN_NODES_PRI_BY_TAGS = "nodes-pri-by-tags"
+_SHUTDOWN_NODES_SEC_BY_TAGS = "nodes-sec-by-tags"
 _SHUTDOWN_INSTANCES = "instances"
+_SHUTDOWN_INSTANCES_BY_TAGS = "instances-by-tags"
+
+_SHUTDOWN_NODES_TAGS_MODES = (
+    _SHUTDOWN_NODES_BOTH_BY_TAGS,
+    _SHUTDOWN_NODES_PRI_BY_TAGS,
+    _SHUTDOWN_NODES_SEC_BY_TAGS)
 
 
 _VALUE_TRUE = "true"
@@ -77,6 +86,7 @@ def _ExpandMultiNames(mode, names, client=None):
 
   """
   # pylint: disable-msg=W0142
+
   if client is None:
     client = GetClient()
   if mode == _SHUTDOWN_CLUSTER:
@@ -88,31 +98,43 @@ def _ExpandMultiNames(mode, names, client=None):
 
   elif mode in (_SHUTDOWN_NODES_BOTH,
                 _SHUTDOWN_NODES_PRI,
-                _SHUTDOWN_NODES_SEC):
-    if not names:
-      raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL)
-    ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
+                _SHUTDOWN_NODES_SEC) + _SHUTDOWN_NODES_TAGS_MODES:
+    if mode in _SHUTDOWN_NODES_TAGS_MODES:
+      if not names:
+        raise errors.OpPrereqError("No node tags passed", errors.ECODE_INVAL)
+      ndata = client.QueryNodes([], ["name", "pinst_list",
+                                     "sinst_list", "tags"], False)
+      ndata = [row for row in ndata if set(row[3]).intersection(names)]
+    else:
+      if not names:
+        raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL)
+      ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
                               False)
+
     ipri = [row[1] for row in ndata]
     pri_names = list(itertools.chain(*ipri))
     isec = [row[2] for row in ndata]
     sec_names = list(itertools.chain(*isec))
-    if mode == _SHUTDOWN_NODES_BOTH:
+    if mode in (_SHUTDOWN_NODES_BOTH, _SHUTDOWN_NODES_BOTH_BY_TAGS):
       inames = pri_names + sec_names
-    elif mode == _SHUTDOWN_NODES_PRI:
+    elif mode in (_SHUTDOWN_NODES_PRI, _SHUTDOWN_NODES_PRI_BY_TAGS):
       inames = pri_names
-    elif mode == _SHUTDOWN_NODES_SEC:
+    elif mode in (_SHUTDOWN_NODES_SEC, _SHUTDOWN_NODES_SEC_BY_TAGS):
       inames = sec_names
     else:
       raise errors.ProgrammerError("Unhandled shutdown type")
-
   elif mode == _SHUTDOWN_INSTANCES:
     if not names:
       raise errors.OpPrereqError("No instance names passed",
                                  errors.ECODE_INVAL)
     idata = client.QueryInstances(names, ["name"], False)
     inames = [row[0] for row in idata]
-
+  elif mode == _SHUTDOWN_INSTANCES_BY_TAGS:
+    if not names:
+      raise errors.OpPrereqError("No instance tags passed",
+                                 errors.ECODE_INVAL)
+    idata = client.QueryInstances([], ["name", "tags"], False)
+    inames = [row[0] for row in idata if set(row[1]).intersection(names)]
   else:
     raise errors.OpPrereqError("Unknown mode '%s'" % mode, errors.ECODE_INVAL)
 
@@ -1269,6 +1291,25 @@ m_inst_opt = cli_option("--instance", dest="multi_mode",
                         help="Filter by instance name [default]",
                         const=_SHUTDOWN_INSTANCES, action="store_const")
 
+m_node_tags_opt = cli_option("--node-tags", dest="multi_mode",
+                             help="Filter by node tag",
+                             const=_SHUTDOWN_NODES_BOTH_BY_TAGS,
+                             action="store_const")
+
+m_pri_node_tags_opt = cli_option("--pri-node-tags", dest="multi_mode",
+                                 help="Filter by primary node tag",
+                                 const=_SHUTDOWN_NODES_PRI_BY_TAGS,
+                                 action="store_const")
+
+m_sec_node_tags_opt = cli_option("--sec-node-tags", dest="multi_mode",
+                                 help="Filter by secondary node tag",
+                                 const=_SHUTDOWN_NODES_SEC_BY_TAGS,
+                                 action="store_const")
+
+m_inst_tags_opt = cli_option("--tags", dest="multi_mode",
+                             help="Filter by instance tag",
+                             const=_SHUTDOWN_INSTANCES_BY_TAGS,
+                             action="store_const")
 
 # this is defined separately due to readability only
 add_opts = [
@@ -1344,7 +1385,8 @@ commands = {
   'reinstall': (
     ReinstallInstance, [ArgInstance()],
     [FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt,
-     m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, SELECT_OS_OPT,
+     m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt,
+     m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT,
      SUBMIT_OPT],
     "[-f] <instance>", "Reinstall a stopped instance"),
   'remove': (
@@ -1369,19 +1411,22 @@ commands = {
   'shutdown': (
     GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
     [m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
-     m_inst_opt, m_force_multi, TIMEOUT_OPT, SUBMIT_OPT],
+     m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
+     m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT, SUBMIT_OPT],
     "<instance>", "Stops an instance"),
   'startup': (
     GenericManyOps("startup", _StartupInstance), [ArgInstance()],
-    [FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt,
-     m_sec_node_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT, HVOPTS_OPT,
+    [FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt,
+     m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
+     m_inst_tags_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT, HVOPTS_OPT,
      BACKEND_OPT],
     "<instance>", "Starts an instance"),
   'reboot': (
     GenericManyOps("reboot", _RebootInstance), [ArgInstance()],
     [m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt,
      m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT,
-     SHUTDOWN_TIMEOUT_OPT],
+     m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
+     m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT],
     "<instance>", "Reboots an instance"),
   'activate-disks': (
     ActivateDisks, ARGS_ONE_INSTANCE, [SUBMIT_OPT, IGNORE_SIZE_OPT],
diff --git a/test/ganeti.cli_unittest.py b/test/ganeti.cli_unittest.py
index 5fb652eb4a89fdc436713aee4797d2dd58749ce6..b768f4a03b091d5bcf795d9aded0369da8965c2f 100755
--- a/test/ganeti.cli_unittest.py
+++ b/test/ganeti.cli_unittest.py
@@ -124,5 +124,127 @@ class TestToStream(unittest.TestCase):
       cli._ToStream(buf, "foo %s %s", "a", "b")
       self.failUnlessEqual(buf.getvalue(), "foo a b\n")
 
+
+class TestGenerateTable(unittest.TestCase):
+  HEADERS = dict([("f%s" % i, "Field%s" % i) for i in range(5)])
+
+  FIELDS1 = ["f1", "f2"]
+  DATA1 = [
+    ["abc", 1234],
+    ["foobar", 56],
+    ["b", -14],
+    ]
+
+  def _test(self, headers, fields, separator, data,
+            numfields, unitfields, units, expected):
+    table = cli.GenerateTable(headers, fields, separator, data,
+                              numfields=numfields, unitfields=unitfields,
+                              units=units)
+    self.assertEqual(table, expected)
+
+  def testPlain(self):
+    exp = [
+      "Field1 Field2",
+      "abc    1234",
+      "foobar 56",
+      "b      -14",
+      ]
+    self._test(self.HEADERS, self.FIELDS1, None, self.DATA1,
+               None, None, "m", exp)
+
+  def testNoFields(self):
+    self._test(self.HEADERS, [], None, [[], []],
+               None, None, "m", ["", "", ""])
+    self._test(None, [], None, [[], []],
+               None, None, "m", ["", ""])
+
+  def testSeparator(self):
+    for sep in ["#", ":", ",", "^", "!", "%", "|", "###", "%%", "!!!", "||"]:
+      exp = [
+        "Field1%sField2" % sep,
+        "abc%s1234" % sep,
+        "foobar%s56" % sep,
+        "b%s-14" % sep,
+        ]
+      self._test(self.HEADERS, self.FIELDS1, sep, self.DATA1,
+                 None, None, "m", exp)
+
+  def testNoHeader(self):
+    exp = [
+      "abc    1234",
+      "foobar 56",
+      "b      -14",
+      ]
+    self._test(None, self.FIELDS1, None, self.DATA1,
+               None, None, "m", exp)
+
+  def testUnknownField(self):
+    headers = {
+      "f1": "Field1",
+      }
+    exp = [
+      "Field1 UNKNOWN",
+      "abc    1234",
+      "foobar 56",
+      "b      -14",
+      ]
+    self._test(headers, ["f1", "UNKNOWN"], None, self.DATA1,
+               None, None, "m", exp)
+
+  def testNumfields(self):
+    fields = ["f1", "f2", "f3"]
+    data = [
+      ["abc", 1234, 0],
+      ["foobar", 56, 3],
+      ["b", -14, "-"],
+      ]
+    exp = [
+      "Field1 Field2 Field3",
+      "abc      1234      0",
+      "foobar     56      3",
+      "b         -14      -",
+      ]
+    self._test(self.HEADERS, fields, None, data,
+               ["f2", "f3"], None, "m", exp)
+
+  def testUnitfields(self):
+    expnosep = [
+      "Field1 Field2 Field3",
+      "abc      1234     0M",
+      "foobar     56     3M",
+      "b         -14      -",
+      ]
+
+    expsep = [
+      "Field1:Field2:Field3",
+      "abc:1234:0M",
+      "foobar:56:3M",
+      "b:-14:-",
+      ]
+
+    for sep, expected in [(None, expnosep), (":", expsep)]:
+      fields = ["f1", "f2", "f3"]
+      data = [
+        ["abc", 1234, 0],
+        ["foobar", 56, 3],
+        ["b", -14, "-"],
+        ]
+      self._test(self.HEADERS, fields, sep, data,
+                 ["f2", "f3"], ["f3"], "h", expected)
+
+  def testUnusual(self):
+    data = [
+      ["%", "xyz"],
+      ["%%", "abc"],
+      ]
+    exp = [
+      "Field1 Field2",
+      "%      xyz",
+      "%%     abc",
+      ]
+    self._test(self.HEADERS, ["f1", "f2"], None, data,
+               None, None, "m", exp)
+
+
 if __name__ == '__main__':
   testutils.GanetiTestProgram()
diff --git a/test/ganeti.locking_unittest.py b/test/ganeti.locking_unittest.py
index 4da57e79481c46178d780819ac9dac19897ace3f..170e8197b39b70fcca9de631dbca8b8c76b3b187 100755
--- a/test/ganeti.locking_unittest.py
+++ b/test/ganeti.locking_unittest.py
@@ -604,7 +604,7 @@ class TestSharedLock(_ThreadedTestCase):
 
   @_Repeat
   def testMixedAcquireTimeout(self):
-    sync = threading.Condition()
+    sync = threading.Event()
 
     def _AcquireShared(ev):
       if not self.sl.acquire(shared=1, timeout=None):
@@ -615,12 +615,8 @@ class TestSharedLock(_ThreadedTestCase):
       # Notify main thread
       ev.set()
 
-      # Wait for notification
-      sync.acquire()
-      try:
-        sync.wait()
-      finally:
-        sync.release()
+      # Wait for notification from main thread
+      sync.wait()
 
       # Release lock
       self.sl.release()
@@ -641,7 +637,7 @@ class TestSharedLock(_ThreadedTestCase):
     self.failIf(self.sl.acquire(shared=0, timeout=0.02))
 
     # Acquire exclusive without timeout
-    exclsync = threading.Condition()
+    exclsync = threading.Event()
     exclev = threading.Event()
 
     def _AcquireExclusive():
@@ -653,11 +649,8 @@ class TestSharedLock(_ThreadedTestCase):
       # Notify main thread
       exclev.set()
 
-      exclsync.acquire()
-      try:
-        exclsync.wait()
-      finally:
-        exclsync.release()
+      # Wait for notification from main thread
+      exclsync.wait()
 
       self.sl.release()
 
@@ -667,11 +660,7 @@ class TestSharedLock(_ThreadedTestCase):
     self.failIf(self.sl.acquire(shared=0, timeout=0.02))
 
     # Make all shared holders release their locks
-    sync.acquire()
-    try:
-      sync.notifyAll()
-    finally:
-      sync.release()
+    sync.set()
 
     # Wait for exclusive acquire to succeed
     exclev.wait()
@@ -690,11 +679,7 @@ class TestSharedLock(_ThreadedTestCase):
       self._addThread(target=_AcquireSharedSimple)
 
     # Tell exclusive lock to release
-    exclsync.acquire()
-    try:
-      exclsync.notifyAll()
-    finally:
-      exclsync.release()
+    exclsync.set()
 
     # Wait for everything to finish
     self._waitThreads()
diff --git a/test/ganeti.utils_unittest.py b/test/ganeti.utils_unittest.py
index fe7464de5d38ae62a60058867badbf382f946109..f0763550d12f16031d0f1715933509107d35e0f9 100755
--- a/test/ganeti.utils_unittest.py
+++ b/test/ganeti.utils_unittest.py
@@ -45,7 +45,8 @@ from ganeti.utils import IsProcessAlive, RunCmd, \
      ParseUnit, AddAuthorizedKey, RemoveAuthorizedKey, \
      ShellQuote, ShellQuoteArgs, TcpPing, ListVisibleFiles, \
      SetEtcHostsEntry, RemoveEtcHostsEntry, FirstFree, OwnIpAddress, \
-     TailFile, ForceDictType, SafeEncode, IsNormAbsPath, FormatTime
+     TailFile, ForceDictType, SafeEncode, IsNormAbsPath, FormatTime, \
+     UnescapeAndSplit
 
 from ganeti.errors import LockError, UnitParseError, GenericError, \
      ProgrammerError
@@ -1053,5 +1054,36 @@ class TestFormatTime(unittest.TestCase):
     FormatTime(int(time.time()))
 
 
+class TestUnescapeAndSplit(unittest.TestCase):
+  """Testing case for UnescapeAndSplit"""
+
+  def setUp(self):
+    # testing more that one separator for regexp safety
+    self._seps = [",", "+", "."]
+
+  def testSimple(self):
+    a = ["a", "b", "c", "d"]
+    for sep in self._seps:
+      self.failUnlessEqual(UnescapeAndSplit(sep.join(a), sep=sep), a)
+
+  def testEscape(self):
+    for sep in self._seps:
+      a = ["a", "b\\" + sep + "c", "d"]
+      b = ["a", "b" + sep + "c", "d"]
+      self.failUnlessEqual(UnescapeAndSplit(sep.join(a), sep=sep), b)
+
+  def testDoubleEscape(self):
+    for sep in self._seps:
+      a = ["a", "b\\\\", "c", "d"]
+      b = ["a", "b\\", "c", "d"]
+      self.failUnlessEqual(UnescapeAndSplit(sep.join(a), sep=sep), b)
+
+  def testThreeEscape(self):
+    for sep in self._seps:
+      a = ["a", "b\\\\\\" + sep + "c", "d"]
+      b = ["a", "b\\" + sep + "c", "d"]
+      self.failUnlessEqual(UnescapeAndSplit(sep.join(a), sep=sep), b)
+
+
 if __name__ == '__main__':
   testutils.GanetiTestProgram()
diff --git a/test/ganeti.workerpool_unittest.py b/test/ganeti.workerpool_unittest.py
index a91858eb21c3a5bfea7f0424a59a400c016ffc73..e9cc30c4cee4efcdd88e4852f8b921e256119bd5 100755
--- a/test/ganeti.workerpool_unittest.py
+++ b/test/ganeti.workerpool_unittest.py
@@ -62,7 +62,7 @@ class TestWorkerpool(unittest.TestCase):
   """Workerpool tests"""
 
   def testDummy(self):
-    wp = workerpool.WorkerPool(3, DummyBaseWorker)
+    wp = workerpool.WorkerPool("Test", 3, DummyBaseWorker)
     try:
       self._CheckWorkerCount(wp, 3)
 
@@ -75,7 +75,7 @@ class TestWorkerpool(unittest.TestCase):
       self._CheckWorkerCount(wp, 0)
 
   def testNoTasks(self):
-    wp = workerpool.WorkerPool(3, DummyBaseWorker)
+    wp = workerpool.WorkerPool("Test", 3, DummyBaseWorker)
     try:
       self._CheckWorkerCount(wp, 3)
       self._CheckNoTasks(wp)
@@ -84,7 +84,7 @@ class TestWorkerpool(unittest.TestCase):
       self._CheckWorkerCount(wp, 0)
 
   def testNoTasksQuiesce(self):
-    wp = workerpool.WorkerPool(3, DummyBaseWorker)
+    wp = workerpool.WorkerPool("Test", 3, DummyBaseWorker)
     try:
       self._CheckWorkerCount(wp, 3)
       self._CheckNoTasks(wp)
@@ -97,7 +97,7 @@ class TestWorkerpool(unittest.TestCase):
   def testChecksum(self):
     # Tests whether all tasks are run and, since we're only using a single
     # thread, whether everything is started in order.
-    wp = workerpool.WorkerPool(1, ChecksumBaseWorker)
+    wp = workerpool.WorkerPool("Test", 1, ChecksumBaseWorker)
     try:
       self._CheckWorkerCount(wp, 1)
 
diff --git a/test/testutils.py b/test/testutils.py
index fddeda73d1a16bf0781c05091132557586a71bbf..fd95c76dce9f2e3ca952fc38c4fcc5486dc46c84 100644
--- a/test/testutils.py
+++ b/test/testutils.py
@@ -22,6 +22,7 @@
 """Utilities for unit testing"""
 
 import os
+import sys
 import stat
 import tempfile
 import unittest
@@ -40,6 +41,10 @@ class GanetiTestProgram(unittest.TestProgram):
 
     """
     logging.basicConfig(filename=os.devnull)
+
+    sys.stderr.write("Running %s\n" % self.progName)
+    sys.stderr.flush()
+
     return unittest.TestProgram.runTests(self)