Commit b6aaf437 authored by René Nussbaumer's avatar René Nussbaumer
Browse files

gnt-instance: Adding instance policy to failover


Signed-off-by: default avatarRené Nussbaumer <rn@google.com>
Reviewed-by: default avatarMichael Hanselmann <hansmi@google.com>
parent 0fb81174
......@@ -192,6 +192,7 @@ __all__ = [
"YES_DOIT_OPT",
"DISK_STATE_OPT",
"HV_STATE_OPT",
"IGNORE_IPOLICY_OPT",
# Generic functions for CLI programs
"ConfirmOperation",
"GenericMain",
......@@ -1334,6 +1335,10 @@ HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
" format hypervisor:option=value,..."),
type="identkeyval")
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
action="store_true", default=False,
help="Ignore instance policy violations")
#: Options provided by all commands
COMMON_OPTS = [DEBUG_OPT]
......
......@@ -791,7 +791,8 @@ def FailoverInstance(opts, args):
ignore_consistency=opts.ignore_consistency,
shutdown_timeout=opts.shutdown_timeout,
iallocator=iallocator,
target_node=target_node)
target_node=target_node,
ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts, cl=cl)
return 0
......@@ -1436,7 +1437,8 @@ commands = {
"failover": (
FailoverInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, IGNORE_CONSIST_OPT, SUBMIT_OPT, SHUTDOWN_TIMEOUT_OPT,
DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT],
DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT,
IGNORE_IPOLICY_OPT],
"[-f] <instance>", "Stops the instance, changes its primary node and"
" (if it was originally running) starts it on the new node"
" (the secondary for mirrored instances or any node"
......
......@@ -7244,7 +7244,8 @@ class LUInstanceFailover(LogicalUnit):
cleanup=False,
failover=True,
ignore_consistency=ignore_consistency,
shutdown_timeout=shutdown_timeout)
shutdown_timeout=shutdown_timeout,
ignore_ipolicy=self.op.ignore_ipolicy)
self.tasklets = [self._migrater]
 
def DeclareLocks(self, level):
......@@ -7644,6 +7645,8 @@ class TLMigrateInstance(Tasklet):
and target node
@type shutdown_timeout: int
@ivar shutdown_timeout: In case of failover timeout of the shutdown
@type ignore_ipolicy: bool
@ivar ignore_ipolicy: If true, we can ignore instance policy when migrating
 
"""
 
......@@ -7654,7 +7657,8 @@ class TLMigrateInstance(Tasklet):
def __init__(self, lu, instance_name, cleanup=False,
failover=False, fallback=False,
ignore_consistency=False,
shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
ignore_ipolicy=False):
"""Initializes this class.
 
"""
......@@ -7668,6 +7672,7 @@ class TLMigrateInstance(Tasklet):
self.fallback = fallback
self.ignore_consistency = ignore_consistency
self.shutdown_timeout = shutdown_timeout
self.ignore_ipolicy = ignore_ipolicy
 
def CheckPrereq(self):
"""Check prerequisites.
......@@ -7679,6 +7684,7 @@ class TLMigrateInstance(Tasklet):
instance = self.cfg.GetInstanceInfo(instance_name)
assert instance is not None
self.instance = instance
cluster = self.cfg.GetClusterInfo()
 
if (not self.cleanup and
not instance.admin_state == constants.ADMINST_UP and
......@@ -7706,6 +7712,12 @@ class TLMigrateInstance(Tasklet):
# BuildHooksEnv
self.target_node = self.lu.op.target_node
 
# Check that the target node is correct in terms of instance policy
nodeinfo = self.cfg.GetNodeInfo(self.target_node)
ipolicy = _CalculateGroupIPolicy(cluster, nodeinfo.group)
_CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
ignore=self.ignore_ipolicy)
# self.target_node is already populated, either directly or by the
# iallocator run
target_node = self.target_node
......@@ -7739,8 +7751,12 @@ class TLMigrateInstance(Tasklet):
" node can be passed)" %
(instance.disk_template, text),
errors.ECODE_INVAL)
nodeinfo = self.cfg.GetNodeInfo(target_node)
ipolicy = _CalculateGroupIPolicy(cluster, nodeinfo.group)
_CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
ignore=self.ignore_ipolicy)
 
i_be = self.cfg.GetClusterInfo().FillBE(instance)
i_be = cluster.FillBE(instance)
 
# check memory requirements on the secondary node
if not self.failover or instance.admin_state == constants.ADMINST_UP:
......@@ -7796,8 +7812,7 @@ class TLMigrateInstance(Tasklet):
self.lu.op.live = None
elif self.lu.op.mode is None:
# read the default value from the hypervisor
i_hv = self.cfg.GetClusterInfo().FillHV(self.instance,
skip_globals=False)
i_hv = cluster.FillHV(self.instance, skip_globals=False)
self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
 
self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
......@@ -7809,6 +7824,7 @@ class TLMigrateInstance(Tasklet):
"""Run the allocator based on input opcode.
 
"""
# FIXME: add a self.ignore_ipolicy option
ial = IAllocator(self.cfg, self.rpc,
mode=constants.IALLOCATOR_MODE_RELOC,
name=self.instance_name,
......
......@@ -152,6 +152,10 @@ _PDiskParams = ("diskparams", None,
_PHvState = ("hv_state", None, ht.TMaybeDict, "Set hypervisor states")
_PDiskState = ("disk_state", None, ht.TMaybeDict, "Set disk states")
_PIgnoreIpolicy = ("ignore_ipolicy", False, ht.TBool,
"Whether to ignore ipolicy violations")
#: OP_ID conversion regular expression
_OPID_RE = re.compile("([a-z])([A-Z])")
......@@ -1236,6 +1240,7 @@ class OpInstanceFailover(OpCode):
_PShutdownTimeout,
_PIgnoreConsistency,
_PMigrationTargetNode,
_PIgnoreIpolicy,
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
]
......
......@@ -1380,7 +1380,7 @@ FAILOVER
^^^^^^^^
**failover** [-f] [--ignore-consistency] [--shutdown-timeout=*N*]
[--submit] {*instance*}
[--submit] [--ignore-ipolicy] {*instance*}
Failover will stop the instance (if running), change its primary node,
and if it was originally running it will start it again (on the new
......@@ -1406,6 +1406,9 @@ The ``--submit`` option is used to send the job to the master daemon
but not wait for its completion. The job ID will be shown so that it
can be examined via **gnt-job info**.
If ``--ignore-ipolicy`` is given any instance policy violations occuring
during this operation are ignored.
Example::
# gnt-instance failover instance1.example.com
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment