diff --git a/NEWS b/NEWS
index 98627bced12d6e134cf65b9880bbfbedf43453da..13d1cb8fb52d6ad583f44d79c4d121e030b4eb96 100644
--- a/NEWS
+++ b/NEWS
@@ -9,10 +9,10 @@ Version 2.6.0 beta 1
 - Deprecated ``admin_up`` field. Instead, ``admin_state`` is introduced,
   with 3 possible values -- ``up``,``down`` and ``offline``.
 
-Version 2.5.0 rc2
+Version 2.5.0 rc4
 -----------------
 
-*(Released Tue, 18 Oct 2011)*
+*(Released Thu, 27 Oct 2011)*
 
 Incompatible/important changes and bugfixes
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -127,6 +127,8 @@ Misc
 
   - :doc:`RAPI <rapi>` documentation now has detailed parameter
     descriptions.
+  - Some opcode/job results are now also documented, see :doc:`RAPI
+    <rapi>`.
 
 - A lockset's internal lock is now also visible in lock monitor.
 - Log messages from job queue workers now contain information about the
@@ -137,6 +139,22 @@ Misc
 - DRBD metadata volumes are overwritten with zeros during disk creation.
 
 
+Version 2.5.0 rc3
+-----------------
+
+*(Released Wed, 26 Oct 2011)*
+
+This was the third release candidate of the 2.5 series.
+
+
+Version 2.5.0 rc2
+-----------------
+
+*(Released Tue, 18 Oct 2011)*
+
+This was the second release candidate of the 2.5 series.
+
+
 Version 2.5.0 rc1
 -----------------
 
diff --git a/configure.ac b/configure.ac
index 06ed0e4186fb82ffe3156f2b7f6a9775bac0bc25..d08bd6b0cc554d9b20242d786b29429662425d31 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
 m4_define([gnt_version_major], [2])
 m4_define([gnt_version_minor], [5])
 m4_define([gnt_version_revision], [0])
-m4_define([gnt_version_suffix], [~rc2])
+m4_define([gnt_version_suffix], [~rc4])
 m4_define([gnt_version_full],
           m4_format([%d.%d.%d%s],
                     gnt_version_major, gnt_version_minor,
diff --git a/doc/examples/ganeti.initd.in b/doc/examples/ganeti.initd.in
index cd137bdf781138957a1e20262f39da2e2918330e..94be6daefa1c77a26f2b74b82673a118d61f4af6 100644
--- a/doc/examples/ganeti.initd.in
+++ b/doc/examples/ganeti.initd.in
@@ -1,5 +1,5 @@
 #!/bin/sh
-# ganeti node daemon starter script
+# ganeti daemons init script
 # based on skeleton from Debian GNU/Linux
 ### BEGIN INIT INFO
 # Provides:          ganeti
diff --git a/doc/hooks.rst b/doc/hooks.rst
index bd3b8d56ea706621684dfc2548375ba5e02d30e1..72b734384b58e09d32b2f2cc68781bbe19ae7237 100644
--- a/doc/hooks.rst
+++ b/doc/hooks.rst
@@ -98,7 +98,7 @@ The scripts will be run as follows:
 
 - stdout and stderr are directed to files
 
-- PATH is reset to ``/sbin:/bin:/usr/sbin:/usr/bin``
+- PATH is reset to :pyeval:`constants.HOOKS_PATH`
 
 - the environment is cleared, and only ganeti-specific variables will
   be left
diff --git a/doc/rapi.rst b/doc/rapi.rst
index b2830e5c896b587dc19fefe323d41151796c6a3d..c80a30931069e6e61fd7d7b7a22164ca07e3232b 100644
--- a/doc/rapi.rst
+++ b/doc/rapi.rst
@@ -1298,6 +1298,10 @@ The query arguments used up to and including Ganeti 2.4 are deprecated
 and should no longer be used. The new request format can be detected by
 the presence of the :pyeval:`rlib2._NODE_MIGRATE_REQV1` feature string.
 
+Job result:
+
+.. opcode_result:: OP_NODE_MIGRATE
+
 
 ``/2/nodes/[node_name]/role``
 +++++++++++++++++++++++++++++
diff --git a/htools/Ganeti/HTools/Loader.hs b/htools/Ganeti/HTools/Loader.hs
index ff85ef87c6df25d2125b57a1e08d8e61252297e8..875436660d02465197b2eddcefda25e8b74201a6 100644
--- a/htools/Ganeti/HTools/Loader.hs
+++ b/htools/Ganeti/HTools/Loader.hs
@@ -315,11 +315,14 @@ checkData nl il =
                              - nodeIdsk node il
                  newn = Node.setFmem (Node.setXmem node delta_mem)
                         (Node.fMem node - adj_mem)
-                 umsg1 = [printf "node %s is missing %d MB ram \
-                                 \and %d GB disk"
-                                 nname delta_mem (delta_dsk `div` 1024) |
-                                 delta_mem > 512 || delta_dsk > 1024]::[String]
-             in (msgs ++ umsg1, newn)
+                 umsg1 =
+                   if delta_mem > 512 || delta_dsk > 1024
+                      then (printf "node %s is missing %d MB ram \
+                                   \and %d GB disk"
+                                   nname delta_mem (delta_dsk `div` 1024)):
+                           msgs
+                      else msgs
+             in (umsg1, newn)
         ) [] nl
 
 -- | Compute the amount of memory used by primary instances on a node.
diff --git a/htools/Ganeti/HTools/Program/Hbal.hs b/htools/Ganeti/HTools/Program/Hbal.hs
index b881033283c7486f3c8a4faf7de1e2690e4b9fb5..9dc5ad187324b5ae497718fc30edbd6abd93b51a 100644
--- a/htools/Ganeti/HTools/Program/Hbal.hs
+++ b/htools/Ganeti/HTools/Program/Hbal.hs
@@ -280,10 +280,9 @@ selectGroup opts gl nlf ilf = do
       Just grp ->
           case lookup (Group.idx grp) ngroups of
             Nothing -> do
-              -- TODO: while this is unlikely to happen, log here the
-              -- actual group data to help debugging
-              hPutStrLn stderr "Internal failure, missing group idx"
-              exitWith $ ExitFailure 1
+              -- This will only happen if there are no nodes assigned
+              -- to this group
+              return (Group.name grp, (Container.empty, Container.empty))
             Just cdata -> return (Group.name grp, cdata)
 
 -- | Do a few checks on the cluster data.
diff --git a/lib/client/gnt_cluster.py b/lib/client/gnt_cluster.py
index fd68fdf1e6b9c70be9df84c3a9db139b97fdbef1..3de05c2a933c75521b35e1756bb775a4503ce246 100644
--- a/lib/client/gnt_cluster.py
+++ b/lib/client/gnt_cluster.py
@@ -1400,7 +1400,7 @@ commands = {
     "", "Does a check on the cluster disk status"),
   "repair-disk-sizes": (
     RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
-    "", "Updates mismatches in recorded disk sizes"),
+    "[instance...]", "Updates mismatches in recorded disk sizes"),
   "master-failover": (
     MasterFailover, ARGS_NONE, [NOVOTING_OPT],
     "", "Makes the current node the master"),
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index dd6217602dfb4b3b8d3bb1210e2563128c7cc24f..fa1b3d133fc9a5830b15b053ce203c578a4f651a 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -3243,7 +3243,10 @@ class LUClusterRepairDiskSizes(NoHooksLU):
         locking.LEVEL_NODE_RES: locking.ALL_SET,
         locking.LEVEL_INSTANCE: locking.ALL_SET,
         }
-    self.share_locks = _ShareAll()
+    self.share_locks = {
+      locking.LEVEL_NODE: 1,
+      locking.LEVEL_INSTANCE: 0,
+      }
 
   def DeclareLocks(self, level):
     if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
@@ -6674,7 +6677,7 @@ class LUInstanceRename(LogicalUnit):
     new_name = self.op.new_name
     if self.op.name_check:
       hostname = netutils.GetHostname(name=new_name)
-      if hostname != new_name:
+      if hostname.name != new_name:
         self.LogInfo("Resolved given name '%s' to '%s'", new_name,
                      hostname.name)
       if not utils.MatchNameComponent(self.op.new_name, [hostname.name]):
@@ -8284,6 +8287,11 @@ def _RemoveDisks(lu, instance, target_node=None):
                       " continuing anyway: %s", device.iv_name, node, msg)
         all_result = False
 
+    # if this is a DRBD disk, return its port to the pool
+    if device.dev_type in constants.LDS_DRBD:
+      tcp_port = device.logical_id[2]
+      lu.cfg.AddTcpUdpPort(tcp_port)
+
   if instance.disk_template == constants.DT_FILE:
     file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
     if target_node:
@@ -9102,6 +9110,11 @@ class LUInstanceCreate(LogicalUnit):
     if self.op.iallocator is not None:
       self._RunAllocator()
 
+    # Release all unneeded node locks
+    _ReleaseLocks(self, locking.LEVEL_NODE,
+                  keep=filter(None, [self.op.pnode, self.op.snode,
+                                     self.op.src_node]))
+
     #### node related checks
 
     # check primary node
@@ -10658,9 +10671,10 @@ def _LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
   (moved, failed, jobs) = alloc_result
 
   if failed:
-    lu.LogWarning("Unable to evacuate instances %s",
-                  utils.CommaJoin("%s (%s)" % (name, reason)
-                                  for (name, reason) in failed))
+    failreason = utils.CommaJoin("%s (%s)" % (name, reason)
+                                 for (name, reason) in failed)
+    lu.LogWarning("Unable to evacuate instances %s", failreason)
+    raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
 
   if moved:
     lu.LogInfo("Instances to be moved: %s",
@@ -11566,6 +11580,11 @@ class LUInstanceSetParams(LogicalUnit):
         self.LogWarning("Could not remove metadata for disk %d on node %s,"
                         " continuing anyway: %s", idx, pnode, msg)
 
+    # this is a DRBD disk, return its port to the pool
+    for disk in old_disks:
+      tcp_port = disk.logical_id[2]
+      self.cfg.AddTcpUdpPort(tcp_port)
+
     # Node resource locks will be released by caller
 
   def Exec(self, feedback_fn):
@@ -11598,6 +11617,11 @@ class LUInstanceSetParams(LogicalUnit):
             self.LogWarning("Could not remove disk/%d on node %s: %s,"
                             " continuing anyway", device_idx, node, msg)
         result.append(("disk/%d" % device_idx, "remove"))
+
+        # if this is a DRBD disk, return its port to the pool
+        if device.dev_type in constants.LDS_DRBD:
+          tcp_port = device.logical_id[2]
+          self.cfg.AddTcpUdpPort(tcp_port)
       elif disk_op == constants.DDM_ADD:
         # add a new disk
         if instance.disk_template in (constants.DT_FILE,
diff --git a/lib/compat.py b/lib/compat.py
index 53ee1fb801d5e96fbfc1353ada0def0ba7cd0818..6515af1a0f00b62d062a17fc73533285e6134046 100644
--- a/lib/compat.py
+++ b/lib/compat.py
@@ -45,11 +45,10 @@ except ImportError:
 # modules (hmac, for example) which have changed their behavior as well from
 # one version to the other.
 try:
-  # pylint: disable=F0401
+  # Yes, these don't always exist, that's why we're testing
   # Yes, we're not using the imports in this module.
-  # pylint: disable=W0611
-  from hashlib import md5 as md5_hash
-  from hashlib import sha1 as sha1_hash
+  from hashlib import md5 as md5_hash # pylint: disable=W0611,E0611,F0401
+  from hashlib import sha1 as sha1_hash # pylint: disable=W0611,E0611,F0401
   # this additional version is needed for compatibility with the hmac module
   sha1 = sha1_hash
 except ImportError:
diff --git a/lib/config.py b/lib/config.py
index 53ff25755658ced2d2a5b33290fd7a4a5bad60b6..d5ca4196ba74753bc999f3d4f3e9808e5fdba85e 100644
--- a/lib/config.py
+++ b/lib/config.py
@@ -1209,6 +1209,14 @@ class ConfigWriter:
     """
     if instance_name not in self._config_data.instances:
       raise errors.ConfigurationError("Unknown instance '%s'" % instance_name)
+
+    # If a network port has been allocated to the instance,
+    # return it to the pool of free ports.
+    inst = self._config_data.instances[instance_name]
+    network_port = getattr(inst, "network_port", None)
+    if network_port is not None:
+      self._config_data.cluster.tcpudp_port_pool.add(network_port)
+
     del self._config_data.instances[instance_name]
     self._config_data.cluster.serial_no += 1
     self._WriteConfig()
diff --git a/lib/constants.py b/lib/constants.py
index 48ac0b51c4e6976e25c3db7bd58d5b05c2c9b663..7a19cfe9cc3491382255c3d938a51f9eeaaef918 100644
--- a/lib/constants.py
+++ b/lib/constants.py
@@ -366,6 +366,7 @@ HOOKS_PHASE_POST = "post"
 HOOKS_NAME_CFGUPDATE = "config-update"
 HOOKS_NAME_WATCHER = "watcher"
 HOOKS_VERSION = 2
+HOOKS_PATH = "/sbin:/bin:/usr/sbin:/usr/bin"
 
 # hooks subject type (what object type does the LU deal with)
 HTYPE_CLUSTER = "CLUSTER"
diff --git a/lib/jqueue.py b/lib/jqueue.py
index 12e9094086f7d7dfde966d61dea69b6b978045ea..e21132e616aeb08eab70742657dc2b112165684f 100644
--- a/lib/jqueue.py
+++ b/lib/jqueue.py
@@ -1827,7 +1827,8 @@ class JobQueue(object):
     @return: a string representing the job identifier.
 
     """
-    assert count > 0
+    assert ht.TPositiveInt(count)
+
     # New number
     serial = self._last_serial + count
 
diff --git a/lib/mcpu.py b/lib/mcpu.py
index bc6248202017db7825666edb527f1f99d37a0b86..95f04e066af6e5a33f2ebbca2f066900d5123638 100644
--- a/lib/mcpu.py
+++ b/lib/mcpu.py
@@ -560,7 +560,7 @@ class HooksMaster(object):
 
     """
     env = {
-      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
+      "PATH": constants.HOOKS_PATH,
       "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
       "GANETI_OP_CODE": self.opcode,
       "GANETI_DATA_DIR": constants.DATA_DIR,
diff --git a/lib/opcodes.py b/lib/opcodes.py
index 986fb74a18596a4dae6ff1df4729ac5104699c28..454a3780def6026b7765256d641c608873dafefd 100644
--- a/lib/opcodes.py
+++ b/lib/opcodes.py
@@ -1025,6 +1025,7 @@ class OpNodeMigrate(OpCode):
     ("iallocator", None, ht.TMaybeString,
      "Iallocator for deciding the target node for shared-storage instances"),
     ]
+  OP_RESULT = TJobIdListOnly
 
 
 class OpNodeEvacuate(OpCode):
diff --git a/test/ganeti.cmdlib_unittest.py b/test/ganeti.cmdlib_unittest.py
index 33debe5e7f253f2df60d70c87c169caf6d365189..e69faf3d67c443e3701034451b85e489354ecab3 100755
--- a/test/ganeti.cmdlib_unittest.py
+++ b/test/ganeti.cmdlib_unittest.py
@@ -28,6 +28,7 @@ import time
 import tempfile
 import shutil
 import operator
+import itertools
 
 from ganeti import constants
 from ganeti import mcpu
@@ -380,5 +381,71 @@ class TestClusterVerifyFiles(unittest.TestCase):
       ]))
 
 
+class _FakeLU:
+  def __init__(self):
+    self.warning_log = []
+    self.info_log = []
+
+  def LogWarning(self, text, *args):
+    self.warning_log.append((text, args))
+
+  def LogInfo(self, text, *args):
+    self.info_log.append((text, args))
+
+
+class TestLoadNodeEvacResult(unittest.TestCase):
+  def testSuccess(self):
+    for moved in [[], [
+      ("inst20153.example.com", "grp2", ["nodeA4509", "nodeB2912"]),
+      ]]:
+      for early_release in [False, True]:
+        for use_nodes in [False, True]:
+          jobs = [
+            [opcodes.OpInstanceReplaceDisks().__getstate__()],
+            [opcodes.OpInstanceMigrate().__getstate__()],
+            ]
+
+          alloc_result = (moved, [], jobs)
+          assert cmdlib.IAllocator._NEVAC_RESULT(alloc_result)
+
+          lu = _FakeLU()
+          result = cmdlib._LoadNodeEvacResult(lu, alloc_result,
+                                              early_release, use_nodes)
+
+          if moved:
+            (_, (info_args, )) = lu.info_log.pop(0)
+            for (instname, instgroup, instnodes) in moved:
+              self.assertTrue(instname in info_args)
+              if use_nodes:
+                for i in instnodes:
+                  self.assertTrue(i in info_args)
+              else:
+                self.assertTrue(instgroup in info_args)
+
+          self.assertFalse(lu.info_log)
+          self.assertFalse(lu.warning_log)
+
+          for op in itertools.chain(*result):
+            if hasattr(op.__class__, "early_release"):
+              self.assertEqual(op.early_release, early_release)
+            else:
+              self.assertFalse(hasattr(op, "early_release"))
+
+  def testFailed(self):
+    alloc_result = ([], [
+      ("inst5191.example.com", "errormsg21178"),
+      ], [])
+    assert cmdlib.IAllocator._NEVAC_RESULT(alloc_result)
+
+    lu = _FakeLU()
+    self.assertRaises(errors.OpExecError, cmdlib._LoadNodeEvacResult,
+                      lu, alloc_result, False, False)
+    self.assertFalse(lu.info_log)
+    (_, (args, )) = lu.warning_log.pop(0)
+    self.assertTrue("inst5191.example.com" in args)
+    self.assertTrue("errormsg21178" in args)
+    self.assertFalse(lu.warning_log)
+
+
 if __name__ == "__main__":
   testutils.GanetiTestProgram()