Commit 4d32c211 authored by Guido Trotter's avatar Guido Trotter
Browse files

Add -s option to gnt-node modify



We can now change a nodes' secondary ip.
Signed-off-by: default avatarGuido Trotter <ultrotter@google.com>
Reviewed-by: default avatarMichael Hanselmann <hansmi@google.com>
parent 2317945a
......@@ -656,7 +656,7 @@ def SetNodeParams(opts, args):
"""
all_changes = [opts.master_candidate, opts.drained, opts.offline,
opts.master_capable, opts.vm_capable]
opts.master_capable, opts.vm_capable, opts.secondary_ip]
if all_changes.count(None) == len(all_changes):
ToStderr("Please give at least one of the parameters.")
return 1
......@@ -667,6 +667,7 @@ def SetNodeParams(opts, args):
drained=opts.drained,
master_capable=opts.master_capable,
vm_capable=opts.vm_capable,
secondary_ip=opts.secondary_ip,
force=opts.force,
auto_promote=opts.auto_promote)
......@@ -720,7 +721,7 @@ commands = {
'modify': (
SetNodeParams, ARGS_ONE_NODE,
[FORCE_OPT, SUBMIT_OPT, MC_OPT, DRAINED_OPT, OFFLINE_OPT,
CAPAB_MASTER_OPT, CAPAB_VM_OPT,
CAPAB_MASTER_OPT, CAPAB_VM_OPT, SECONDARY_IP_OPT,
AUTO_PROMOTE_OPT, DRY_RUN_OPT, PRIORITY_OPT],
"<node_name>", "Alters the parameters of a node"),
'powercycle': (
......
......@@ -650,6 +650,33 @@ def _CheckNodeHasOS(lu, node, os_name, force_variant):
_CheckOSVariant(result.payload, os_name)
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
"""Ensure that a node has the given secondary ip.
@type lu: L{LogicalUnit}
@param lu: the LU on behalf of which we make the check
@type node: string
@param node: the node to check
@type secondary_ip: string
@param secondary_ip: the ip to check
@type prereq: boolean
@param prereq: whether to throw a prerequisite or an execute error
@raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
@raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
"""
result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
result.Raise("Failure checking secondary ip on node %s" % node,
prereq=prereq, ecode=errors.ECODE_ENVIRON)
if not result.payload:
msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
" please fix and re-run this command" % secondary_ip)
if prereq:
raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
else:
raise errors.OpExecError(msg)
def _RequireFileStorage():
"""Checks that file storage is enabled.
......@@ -3832,7 +3859,7 @@ class LUAddNode(LogicalUnit):
if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
source=myself.secondary_ip):
raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
" based ping to noded port",
" based ping to node daemon port",
errors.ECODE_ENVIRON)
if self.op.readd:
......@@ -3904,14 +3931,8 @@ class LUAddNode(LogicalUnit):
result.Raise("Can't update hosts file with new host data")
if new_node.secondary_ip != new_node.primary_ip:
result = self.rpc.call_node_has_ip_address(new_node.name,
new_node.secondary_ip)
result.Raise("Failure checking secondary ip on node %s" % new_node.name,
prereq=True, ecode=errors.ECODE_ENVIRON)
if not result.payload:
raise errors.OpExecError("Node claims it doesn't have the secondary ip"
" you gave (%s). Please fix and re-run this"
" command." % new_node.secondary_ip)
_CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
False)
node_verify_list = [self.cfg.GetMasterNode()]
node_verify_param = {
......@@ -3968,6 +3989,7 @@ class LUSetNodeParams(LogicalUnit):
("auto_promote", False, ht.TBool),
("master_capable", None, ht.TMaybeBool),
("vm_capable", None, ht.TMaybeBool),
("secondary_ip", None, ht.TMaybeString),
_PForce,
]
REQ_BGL = False
......@@ -3984,7 +4006,8 @@ class LUSetNodeParams(LogicalUnit):
def CheckArguments(self):
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
self.op.master_capable, self.op.vm_capable]
self.op.master_capable, self.op.vm_capable,
self.op.secondary_ip]
if all_mods.count(None) == len(all_mods):
raise errors.OpPrereqError("Please pass at least one modification",
errors.ECODE_INVAL)
......@@ -3999,7 +4022,14 @@ class LUSetNodeParams(LogicalUnit):
self.op.drained == True or
self.op.master_capable == False)
if self.op.secondary_ip:
if not netutils.IP4Address.IsValid(self.op.secondary_ip):
raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
" address" % self.op.secondary_ip,
errors.ECODE_INVAL)
self.lock_all = self.op.auto_promote and self.might_demote
self.lock_instances = self.op.secondary_ip is not None
def ExpandNames(self):
if self.lock_all:
......@@ -4007,6 +4037,29 @@ class LUSetNodeParams(LogicalUnit):
else:
self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
if self.lock_instances:
self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
def DeclareLocks(self, level):
# If we have locked all instances, before waiting to lock nodes, release
# all the ones living on nodes unrelated to the current operation.
if level == locking.LEVEL_NODE and self.lock_instances:
instances_release = []
instances_keep = []
self.affected_instances = []
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
instance = self.context.cfg.GetInstanceInfo(instance_name)
i_mirrored = instance.disk_template in constants.DTS_NET_MIRROR
if i_mirrored and self.op.node_name in instance.all_nodes:
instances_keep.append(instance_name)
self.affected_instances.append(instance)
else:
instances_release.append(instance_name)
if instances_release:
self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
def BuildHooksEnv(self):
"""Build hooks env.
......@@ -4121,6 +4174,35 @@ class LUSetNodeParams(LogicalUnit):
" without using re-add. Please make sure the node"
" is healthy!")
if self.op.secondary_ip:
# Ok even without locking, because this can't be changed by any LU
master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
master_singlehomed = master.secondary_ip == master.primary_ip
if master_singlehomed and self.op.secondary_ip:
raise errors.OpPrereqError("Cannot change the secondary ip on a single"
" homed cluster", errors.ECODE_INVAL)
if node.offline:
if self.affected_instances:
raise errors.OpPrereqError("Cannot change secondary ip: offline"
" node has instances (%s) configured"
" to use it" % self.affected_instances)
else:
# On online nodes, check that no instances are running, and that
# the node has the new ip and we can reach it.
for instance in self.affected_instances:
_CheckInstanceDown(self, instance, "cannot change secondary ip")
_CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
if master.name != node.name:
# check reachability from master secondary ip to new secondary ip
if not netutils.TcpPing(self.op.secondary_ip,
constants.DEFAULT_NODED_PORT,
source=master.secondary_ip):
raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
" based ping to node daemon port",
errors.ECODE_ENVIRON)
def Exec(self, feedback_fn):
"""Modifies a node.
......@@ -4154,6 +4236,10 @@ class LUSetNodeParams(LogicalUnit):
if self.lock_all:
_AdjustCandidatePool(self, [node.name])
if self.op.secondary_ip:
node.secondary_ip = self.op.secondary_ip
result.append(("secondary_ip", self.op.secondary_ip))
# this will trigger configuration file update, if needed
self.cfg.Update(node, feedback_fn)
......
......@@ -437,6 +437,7 @@ class OpSetNodeParams(OpCode):
"auto_promote",
"master_capable",
"vm_capable",
"secondary_ip",
]
......
......@@ -637,6 +637,7 @@
<arg>--offline=<option>yes|no</option></arg>
<arg>--master-capable=<option>yes|no</option></arg>
<arg>--vm-capable=<option>yes|no</option></arg>
<arg>-s <replaceable>secondary_ip</replaceable></arg>
<arg>--auto-promote</arg>
<arg choice="req"><replaceable>node</replaceable></arg>
</cmdsynopsis>
......@@ -671,6 +672,12 @@
</screen>
</para>
<para>
The <option>-s</option> can be used to change the node's secondary ip.
No drbd instances can be running on the node, while this operation is
taking place.
</para>
<para>Example (setting the node back to online and master candidate):
<screen>
# gnt-node modify --offline=no --master-candidate=yes node1.example.com
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment