Commit 9ca8a7c5 authored by Agata Murawska's avatar Agata Murawska
Browse files

Rename admin_up to admin_state


Signed-off-by: default avatarAgata Murawska <agatamurawska@google.com>
Reviewed-by: default avatarIustin Pop <iustin@google.com>
parent 34598551
......@@ -111,7 +111,7 @@ instances
cluster, indexed by instance name; the contents are similar to the
instance definitions for the allocate mode, with the addition of:
admin_up
admin_state
if this instance is set to run (but not the actual status of the
instance)
......
......@@ -905,7 +905,7 @@ def _GetClusterDomainSecret():
def _CheckInstanceDown(lu, instance, reason):
"""Ensure that an instance is not running."""
if instance.admin_up:
if instance.admin_state:
raise errors.OpPrereqError("Instance %s is marked to be up, %s" %
(instance.name, reason), errors.ECODE_STATE)
......@@ -1088,7 +1088,7 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
"primary_node": instance.primary_node,
"secondary_nodes": instance.secondary_nodes,
"os_type": instance.os,
"status": instance.admin_up,
"status": instance.admin_state,
"memory": bep[constants.BE_MEMORY],
"vcpus": bep[constants.BE_VCPUS],
"nics": _NICListToTuple(lu, instance.nics),
......@@ -2033,7 +2033,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
_ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance,
"volume %s missing on node %s", volume, node)
if instanceconfig.admin_up:
if instanceconfig.admin_state:
pri_img = node_image[node_current]
test = instance not in pri_img.instances and not pri_img.offline
_ErrorIf(test, constants.CV_EINSTANCEDOWN, instance,
......@@ -2049,11 +2049,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
# node here
snode = node_image[nname]
bad_snode = snode.ghost or snode.offline
_ErrorIf(instanceconfig.admin_up and not success and not bad_snode,
_ErrorIf(instanceconfig.admin_state and not success and not bad_snode,
constants.CV_EINSTANCEFAULTYDISK, instance,
"couldn't retrieve status for disk/%s on %s: %s",
idx, nname, bdev_status)
_ErrorIf((instanceconfig.admin_up and success and
_ErrorIf((instanceconfig.admin_state and success and
bdev_status.ldisk_status == constants.LDS_FAULTY),
constants.CV_EINSTANCEFAULTYDISK, instance,
"disk/%s on %s is faulty", idx, nname)
......@@ -2262,7 +2262,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
node_drbd[minor] = (instance, False)
else:
instance = instanceinfo[instance]
node_drbd[minor] = (instance.name, instance.admin_up)
node_drbd[minor] = (instance.name, instance.admin_state)
# and now check them
used_minors = nresult.get(constants.NV_DRBDLIST, [])
......@@ -2910,7 +2910,7 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
constants.CV_ENODERPC, pnode, "instance %s, connection to"
" primary node failed", instance)
_ErrorIf(inst_config.admin_up and pnode_img.offline,
_ErrorIf(inst_config.admin_state and pnode_img.offline,
constants.CV_EINSTANCEBADNODE, instance,
"instance is marked as running and lives on offline node %s",
inst_config.primary_node)
......@@ -3165,7 +3165,7 @@ class LUGroupVerifyDisks(NoHooksLU):
nv_dict = _MapInstanceDisksToNodes([inst
for inst in self.instances.values()
if inst.admin_up])
if inst.admin_state])
if nv_dict:
nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
......@@ -7061,7 +7061,7 @@ class LUInstanceMove(LogicalUnit):
_CheckNodeNotDrained(self, target_node)
_CheckNodeVmCapable(self, target_node)
if instance.admin_up:
if instance.admin_state:
# check memory requirements on the secondary node
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
instance.name, bep[constants.BE_MEMORY],
......@@ -7155,7 +7155,7 @@ class LUInstanceMove(LogicalUnit):
_RemoveDisks(self, instance, target_node=source_node)
# Only start the instance if it's marked as up
if instance.admin_up:
if instance.admin_state:
self.LogInfo("Starting instance %s on node %s",
instance.name, target_node)
......@@ -7293,7 +7293,7 @@ class TLMigrateInstance(Tasklet):
assert instance is not None
self.instance = instance
if (not self.cleanup and not instance.admin_up and not self.failover and
if (not self.cleanup and not instance.admin_state and not self.failover and
self.fallback):
self.lu.LogInfo("Instance is marked down, fallback allowed, switching"
" to failover")
......@@ -7355,7 +7355,7 @@ class TLMigrateInstance(Tasklet):
i_be = self.cfg.GetClusterInfo().FillBE(instance)
# check memory requirements on the secondary node
if not self.failover or instance.admin_up:
if not self.failover or instance.admin_state:
_CheckNodeFreeMemory(self.lu, target_node, "migrating instance %s" %
instance.name, i_be[constants.BE_MEMORY],
instance.hypervisor)
......@@ -7772,7 +7772,7 @@ class TLMigrateInstance(Tasklet):
source_node = instance.primary_node
target_node = self.target_node
if instance.admin_up:
if instance.admin_state:
self.feedback_fn("* checking disk consistency between source and target")
for dev in instance.disks:
# for drbd, these are drbd over lvm
......@@ -7815,7 +7815,7 @@ class TLMigrateInstance(Tasklet):
self.cfg.Update(instance, self.feedback_fn)
# Only start the instance if it's marked as up
if instance.admin_up:
if instance.admin_state:
self.feedback_fn("* activating the instance's disks on target node %s" %
target_node)
logging.info("Starting instance %s on node %s",
......@@ -9217,7 +9217,7 @@ class LUInstanceCreate(LogicalUnit):
primary_node=pnode_name,
nics=self.nics, disks=disks,
disk_template=self.op.disk_template,
admin_up=False,
admin_state=False,
network_port=network_port,
beparams=self.op.beparams,
hvparams=self.op.hvparams,
......@@ -9397,7 +9397,7 @@ class LUInstanceCreate(LogicalUnit):
assert not self.owned_locks(locking.LEVEL_NODE_RES)
if self.op.start:
iobj.admin_up = True
iobj.admin_state = True
self.cfg.Update(iobj, feedback_fn)
logging.info("Starting instance %s on node %s", instance, pnode_name)
feedback_fn("* starting instance...")
......@@ -9445,7 +9445,7 @@ class LUInstanceConsole(NoHooksLU):
node_insts.Raise("Can't get node information from %s" % node)
if instance.name not in node_insts.payload:
if instance.admin_up:
if instance.admin_state:
state = constants.INSTST_ERRORDOWN
else:
state = constants.INSTST_ADMINDOWN
......@@ -9895,7 +9895,7 @@ class TLReplaceDisks(Tasklet):
feedback_fn("Replacing disk(s) %s for %s" %
(utils.CommaJoin(self.disks), self.instance.name))
activate_disks = (not self.instance.admin_up)
activate_disks = (not self.instance.admin_state)
# Activate the instance disks if we're replacing them on a down instance
if activate_disks:
......@@ -10396,7 +10396,7 @@ class LURepairNodeStorage(NoHooksLU):
"""
# Check whether any instance on this node has faulty disks
for inst in _GetNodeInstances(self.cfg, self.op.node_name):
if not inst.admin_up:
if not inst.admin_state:
continue
check_nodes = set(inst.all_nodes)
check_nodes.discard(self.op.node_name)
......@@ -10758,9 +10758,9 @@ class LUInstanceGrowDisk(LogicalUnit):
if disk_abort:
self.proc.LogWarning("Disk sync-ing has not returned a good"
" status; please check the instance")
if not instance.admin_up:
if not instance.admin_state:
_SafeShutdownInstanceDisks(self, instance, disks=[disk])
elif not instance.admin_up:
elif not instance.admin_state:
self.proc.LogWarning("Not shutting down the disk even if the instance is"
" not supposed to be running because no wait for"
" sync mode was requested")
......@@ -10901,7 +10901,7 @@ class LUInstanceQueryData(NoHooksLU):
else:
remote_state = "down"
if instance.admin_up:
if instance.admin_state:
config_state = "up"
else:
config_state = "down"
......@@ -12000,7 +12000,7 @@ class LUBackupExport(LogicalUnit):
"Cannot retrieve locked instance %s" % self.op.instance_name
_CheckNodeOnline(self, self.instance.primary_node)
if (self.op.remove_instance and self.instance.admin_up and
if (self.op.remove_instance and self.instance.admin_state and
not self.op.shutdown):
raise errors.OpPrereqError("Can not remove instance without shutting it"
" down before")
......@@ -12130,7 +12130,7 @@ class LUBackupExport(LogicalUnit):
for disk in instance.disks:
self.cfg.SetDiskID(disk, src_node)
activate_disks = (not instance.admin_up)
activate_disks = (not instance.admin_state)
if activate_disks:
# Activate the instance disks if we'exporting a stopped instance
......@@ -12143,7 +12143,7 @@ class LUBackupExport(LogicalUnit):
helper.CreateSnapshots()
try:
if (self.op.shutdown and instance.admin_up and
if (self.op.shutdown and instance.admin_state and
not self.op.remove_instance):
assert not activate_disks
feedback_fn("Starting instance %s" % instance.name)
......@@ -13446,7 +13446,7 @@ class IAllocator(object):
i_mem_diff = beinfo[constants.BE_MEMORY] - i_used_mem
remote_info["memory_free"] -= max(0, i_mem_diff)
if iinfo.admin_up:
if iinfo.admin_state:
i_p_up_mem += beinfo[constants.BE_MEMORY]
# compute memory used by instances
......@@ -13486,7 +13486,7 @@ class IAllocator(object):
nic_data.append(nic_dict)
pir = {
"tags": list(iinfo.GetTags()),
"admin_up": iinfo.admin_up,
"admin_state": iinfo.admin_state,
"vcpus": beinfo[constants.BE_VCPUS],
"memory": beinfo[constants.BE_MEMORY],
"os": iinfo.os,
......
......@@ -1182,8 +1182,8 @@ class ConfigWriter:
raise errors.ConfigurationError("Unknown instance '%s'" %
instance_name)
instance = self._config_data.instances[instance_name]
if instance.admin_up != status:
instance.admin_up = status
if instance.admin_state != status:
instance.admin_state = status
instance.serial_no += 1
instance.mtime = time.time()
self._WriteConfig()
......
......@@ -1271,6 +1271,16 @@ INSTST_ALL = frozenset([
INSTST_ERRORDOWN,
])
# Admin states
ADMINST_UP = "up"
ADMINST_DOWN = "down"
ADMINST_OFFLINE = "offline"
ADMINST_ALL = frozenset([
ADMINST_UP,
ADMINST_DOWN,
ADMINST_OFFLINE,
])
# Node roles
NR_REGULAR = "R"
NR_MASTER = "M"
......
......@@ -744,7 +744,7 @@ class Instance(TaggableObject):
"hvparams",
"beparams",
"osparams",
"admin_up",
"admin_state",
"nics",
"disks",
"disk_template",
......@@ -884,6 +884,13 @@ class Instance(TaggableObject):
"""Custom function for instances.
"""
if "admin_state" not in val:
if val.get("admin_up", False):
val["admin_state"] = constants.ADMINST_UP
else:
val["admin_state"] = constants.ADMINST_DOWN
if "admin_up" in val:
del val["admin_up"]
obj = super(Instance, cls).FromDict(val)
obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC)
obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk)
......
......@@ -1348,12 +1348,12 @@ def _GetInstStatus(ctx, inst):
if bool(ctx.live_data.get(inst.name)):
if inst.name in ctx.wrongnode_inst:
return constants.INSTST_WRONGNODE
elif inst.admin_up:
elif inst.admin_state:
return constants.INSTST_RUNNING
else:
return constants.INSTST_ERRORUP
if inst.admin_up:
if inst.admin_state:
return constants.INSTST_ERRORDOWN
return constants.INSTST_ADMINDOWN
......@@ -1778,7 +1778,7 @@ def _BuildInstanceFields():
(_MakeField("admin_state", "Autostart", QFT_BOOL,
"Desired state of instance (if set, the instance should be"
" up)"),
IQ_CONFIG, 0, _GetItemAttr("admin_up")),
IQ_CONFIG, 0, _GetItemAttr("admin_state")),
(_MakeField("tags", "Tags", QFT_OTHER, "Tags"), IQ_CONFIG, 0,
lambda ctx, inst: list(inst.GetTags())),
(_MakeField("console", "Console", QFT_OTHER,
......
......@@ -644,7 +644,7 @@ class TestInstanceQuery(unittest.TestCase):
objects.Instance(name="inst1", hvparams={}, beparams={}, nics=[],
uuid="f90eccb3-e227-4e3c-bf2a-94a21ca8f9cd",
ctime=1291244000, mtime=1291244400, serial_no=30,
admin_up=True, hypervisor=constants.HT_XEN_PVM, os="linux1",
admin_state=True, hypervisor=constants.HT_XEN_PVM, os="linux1",
primary_node="node1",
disk_template=constants.DT_PLAIN,
disks=[],
......@@ -652,7 +652,7 @@ class TestInstanceQuery(unittest.TestCase):
objects.Instance(name="inst2", hvparams={}, nics=[],
uuid="73a0f8a7-068c-4630-ada2-c3440015ab1a",
ctime=1291211000, mtime=1291211077, serial_no=1,
admin_up=True, hypervisor=constants.HT_XEN_HVM, os="deb99",
admin_state=True, hypervisor=constants.HT_XEN_HVM, os="deb99",
primary_node="node5",
disk_template=constants.DT_DISKLESS,
disks=[],
......@@ -663,7 +663,7 @@ class TestInstanceQuery(unittest.TestCase):
objects.Instance(name="inst3", hvparams={}, beparams={},
uuid="11ec8dff-fb61-4850-bfe0-baa1803ff280",
ctime=1291011000, mtime=1291013000, serial_no=1923,
admin_up=False, hypervisor=constants.HT_KVM, os="busybox",
admin_state=False, hypervisor=constants.HT_KVM, os="busybox",
primary_node="node6",
disk_template=constants.DT_DRBD8,
disks=[],
......@@ -678,7 +678,7 @@ class TestInstanceQuery(unittest.TestCase):
objects.Instance(name="inst4", hvparams={}, beparams={},
uuid="68dab168-3ef5-4c9d-b4d3-801e0672068c",
ctime=1291244390, mtime=1291244395, serial_no=25,
admin_up=False, hypervisor=constants.HT_XEN_PVM, os="linux1",
admin_state=False, hypervisor=constants.HT_XEN_PVM, os="linux1",
primary_node="nodeoff2",
disk_template=constants.DT_DRBD8,
disks=[],
......@@ -702,7 +702,7 @@ class TestInstanceQuery(unittest.TestCase):
objects.Instance(name="inst5", hvparams={}, nics=[],
uuid="0e3dca12-5b42-4e24-98a2-415267545bd0",
ctime=1231211000, mtime=1261200000, serial_no=3,
admin_up=True, hypervisor=constants.HT_XEN_HVM, os="deb99",
admin_state=True, hypervisor=constants.HT_XEN_HVM, os="deb99",
primary_node="nodebad2",
disk_template=constants.DT_DISKLESS,
disks=[],
......@@ -713,7 +713,7 @@ class TestInstanceQuery(unittest.TestCase):
objects.Instance(name="inst6", hvparams={}, nics=[],
uuid="72de6580-c8d5-4661-b902-38b5785bb8b3",
ctime=7513, mtime=11501, serial_no=13390,
admin_up=False, hypervisor=constants.HT_XEN_HVM, os="deb99",
admin_state=False, hypervisor=constants.HT_XEN_HVM, os="deb99",
primary_node="node7",
disk_template=constants.DT_DISKLESS,
disks=[],
......@@ -726,7 +726,7 @@ class TestInstanceQuery(unittest.TestCase):
objects.Instance(name="inst7", hvparams={}, nics=[],
uuid="ceec5dc4-b729-4f42-ae28-69b3cd24920e",
ctime=None, mtime=None, serial_no=1947,
admin_up=False, hypervisor=constants.HT_XEN_HVM, os="deb99",
admin_state=False, hypervisor=constants.HT_XEN_HVM, os="deb99",
primary_node="node6",
disk_template=constants.DT_DISKLESS,
disks=[],
......@@ -799,11 +799,11 @@ class TestInstanceQuery(unittest.TestCase):
elif inst.name in live_data:
if inst.name in wrongnode_inst:
exp_status = constants.INSTST_WRONGNODE
elif inst.admin_up:
elif inst.admin_state:
exp_status = constants.INSTST_RUNNING
else:
exp_status = constants.INSTST_ERRORUP
elif inst.admin_up:
elif inst.admin_state:
exp_status = constants.INSTST_ERRORDOWN
else:
exp_status = constants.INSTST_ADMINDOWN
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment