diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index dea29e4b9dba11a3f81d38b345a73ae9da2ab480..f5566775baaca189ed08e8768a3ef13aa6606d5d 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -2202,14 +2202,24 @@ class LUAddNode(LogicalUnit):
                                    " based ping to noded port")
 
     cp_size = self.cfg.GetClusterInfo().candidate_pool_size
-    mc_now, _ = self.cfg.GetMasterCandidateStats()
-    master_candidate = mc_now < cp_size
+    if self.op.readd:
+      exceptions = [node]
+    else:
+      exceptions = []
+    mc_now, mc_max = self.cfg.GetMasterCandidateStats(exceptions)
+    # the new node will increase mc_max with one, so:
+    mc_max = min(mc_max + 1, cp_size)
+    self.master_candidate = mc_now < mc_max
 
-    self.new_node = objects.Node(name=node,
-                                 primary_ip=primary_ip,
-                                 secondary_ip=secondary_ip,
-                                 master_candidate=master_candidate,
-                                 offline=False, drained=False)
+    if self.op.readd:
+      self.new_node = self.cfg.GetNodeInfo(node)
+      assert self.new_node is not None, "Can't retrieve locked node %s" % node
+    else:
+      self.new_node = objects.Node(name=node,
+                                   primary_ip=primary_ip,
+                                   secondary_ip=secondary_ip,
+                                   master_candidate=self.master_candidate,
+                                   offline=False, drained=False)
 
   def Exec(self, feedback_fn):
     """Adds the new node to the cluster.
@@ -2218,6 +2228,20 @@ class LUAddNode(LogicalUnit):
     new_node = self.new_node
     node = new_node.name
 
+    # for re-adds, reset the offline/drained/master-candidate flags;
+    # we need to reset here, otherwise offline would prevent RPC calls
+    # later in the procedure; this also means that if the re-add
+    # fails, we are left with a non-offlined, broken node
+    if self.op.readd:
+      new_node.drained = new_node.offline = False
+      self.LogInfo("Readding a node, the offline/drained flags were reset")
+      # if we demote the node, we do cleanup later in the procedure
+      new_node.master_candidate = self.master_candidate
+
+    # notify the user about any possible mc promotion
+    if new_node.master_candidate:
+      self.LogInfo("Node will be a master candidate")
+
     # check connectivity
     result = self.rpc.call_version([node])[node]
     result.Raise()
@@ -2313,6 +2337,15 @@ class LUAddNode(LogicalUnit):
 
     if self.op.readd:
       self.context.ReaddNode(new_node)
+      # make sure we redistribute the config
+      self.cfg.Update(new_node)
+      # and make sure the new node will not have old files around
+      if not new_node.master_candidate:
+        result = self.rpc.call_node_demote_from_mc(new_node.name)
+        msg = result.RemoteFailMsg()
+        if msg:
+          self.LogWarning("Node failed to demote itself from master"
+                          " candidate status: %s" % msg)
     else:
       self.context.AddNode(new_node)
 
diff --git a/scripts/gnt-node b/scripts/gnt-node
index 499fcd040374c16111fe9848d5ed9bfe00d62931..bb244d58690fa3afb7fc7546ca2bda0ea8309dd7 100755
--- a/scripts/gnt-node
+++ b/scripts/gnt-node
@@ -100,18 +100,7 @@ def AddNode(opts, args):
   output = cl.QueryConfigValues(['cluster_name'])
   cluster_name = output[0]
 
-  if readd:
-    # clear the offline and drain flags on the node
-    ToStdout("Resetting the 'offline' and 'drained' flags due to re-add")
-    op = opcodes.OpSetNodeParams(node_name=node, force=True,
-                                 offline=False, drained=False)
-
-    result = SubmitOpCode(op, cl=cl)
-    if result:
-      ToStdout("Modified:")
-      for param, data in result:
-        ToStdout(" - %-5s -> %s", param, data)
-  else:
+  if not readd:
     ToStderr("-- WARNING -- \n"
              "Performing this operation is going to replace the ssh daemon"
              " keypair\n"