diff --git a/lib/client/gnt_instance.py b/lib/client/gnt_instance.py index 180c0268b0880ec733bbf748f85ebd71893c8c7b..96eaef52b31f8cf94c57760f0444892bd2693dac 100644 --- a/lib/client/gnt_instance.py +++ b/lib/client/gnt_instance.py @@ -212,7 +212,7 @@ def ListInstances(opts, args): fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips", "nic.modes", "nic.links", "nic.bridges", - "snodes"], + "snodes", "snodes.group", "snodes.group.uuid"], (lambda value: ",".join(str(item) for item in value), False)) diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 6b880e73fd14b4edcc71fa01be94cc4a5c302f06..9955e4c97b5bd0567f404c06e1577b9f51c8d920 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -4492,8 +4492,7 @@ class _InstanceQuery(_QueryBase): def ExpandNames(self, lu): lu.needed_locks = {} - lu.share_locks[locking.LEVEL_INSTANCE] = 1 - lu.share_locks[locking.LEVEL_NODE] = 1 + lu.share_locks = _ShareAll() if self.names: self.wanted = _GetWantedInstances(lu, self.names) @@ -4504,17 +4503,54 @@ class _InstanceQuery(_QueryBase): query.IQ_LIVE in self.requested_data) if self.do_locking: lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted + lu.needed_locks[locking.LEVEL_NODEGROUP] = [] lu.needed_locks[locking.LEVEL_NODE] = [] lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE + self.do_grouplocks = (self.do_locking and + query.IQ_NODES in self.requested_data) + def DeclareLocks(self, lu, level): - if level == locking.LEVEL_NODE and self.do_locking: - lu._LockInstancesNodes() # pylint: disable-msg=W0212 + if self.do_locking: + if level == locking.LEVEL_NODEGROUP and self.do_grouplocks: + assert not lu.needed_locks[locking.LEVEL_NODEGROUP] + + # Lock all groups used by instances optimistically; this requires going + # via the node before it's locked, requiring verification later on + lu.needed_locks[locking.LEVEL_NODEGROUP] = \ + set(group_uuid + for instance_name in + lu.glm.list_owned(locking.LEVEL_INSTANCE) + for group_uuid in + lu.cfg.GetInstanceNodeGroups(instance_name)) + elif level == locking.LEVEL_NODE: + lu._LockInstancesNodes() # pylint: disable-msg=W0212 + + @staticmethod + def _CheckGroupLocks(lu): + owned_instances = frozenset(lu.glm.list_owned(locking.LEVEL_INSTANCE)) + owned_groups = frozenset(lu.glm.list_owned(locking.LEVEL_NODEGROUP)) + + # Check if node groups for locked instances are still correct + for instance_name in owned_instances: + inst_groups = lu.cfg.GetInstanceNodeGroups(instance_name) + if not owned_groups.issuperset(inst_groups): + raise errors.OpPrereqError("Instance %s's node groups changed since" + " locks were acquired, current groups are" + " are '%s', owning groups '%s'; retry the" + " operation" % + (instance_name, + utils.CommaJoin(inst_groups), + utils.CommaJoin(owned_groups)), + errors.ECODE_STATE) def _GetQueryData(self, lu): """Computes the list of instances and their attributes. """ + if self.do_grouplocks: + self._CheckGroupLocks(lu) + cluster = lu.cfg.GetClusterInfo() all_info = lu.cfg.GetAllInstancesInfo() @@ -4577,9 +4613,21 @@ class _InstanceQuery(_QueryBase): else: consinfo = None + if query.IQ_NODES in self.requested_data: + node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"), + instance_list))) + nodes = dict((name, lu.cfg.GetNodeInfo(name)) for name in node_names) + groups = dict((uuid, lu.cfg.GetNodeGroup(uuid)) + for uuid in set(map(operator.attrgetter("group"), + nodes.values()))) + else: + nodes = None + groups = None + return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(), disk_usage, offline_nodes, bad_nodes, - live_data, wrongnode_inst, consinfo) + live_data, wrongnode_inst, consinfo, + nodes, groups) class LUQuery(NoHooksLU): diff --git a/lib/query.py b/lib/query.py index 5d491444a1467fade3aed18b82a1e24ec63252f2..3b8133eb264fc79183dda16bb5c5d1743b293403 100644 --- a/lib/query.py +++ b/lib/query.py @@ -83,7 +83,8 @@ from ganeti.constants import (QFT_UNKNOWN, QFT_TEXT, QFT_BOOL, QFT_NUMBER, (IQ_CONFIG, IQ_LIVE, IQ_DISKUSAGE, - IQ_CONSOLE) = range(100, 104) + IQ_CONSOLE, + IQ_NODES) = range(100, 105) (LQ_MODE, LQ_OWNER, @@ -1223,7 +1224,7 @@ class InstanceQueryData: """ def __init__(self, instances, cluster, disk_usage, offline_nodes, bad_nodes, - live_data, wrongnode_inst, console): + live_data, wrongnode_inst, console, nodes, groups): """Initializes this class. @param instances: List of instance objects @@ -1240,6 +1241,8 @@ class InstanceQueryData: @param wrongnode_inst: Set of instances running on wrong node(s) @type console: dict; instance name as key @param console: Per-instance console information + @type nodes: dict; node name as key + @param nodes: Node objects """ assert len(set(bad_nodes) & set(offline_nodes)) == len(offline_nodes), \ @@ -1255,6 +1258,8 @@ class InstanceQueryData: self.live_data = live_data self.wrongnode_inst = wrongnode_inst self.console = console + self.nodes = nodes + self.groups = groups # Used for individual rows self.inst_hvparams = None @@ -1701,6 +1706,45 @@ _INST_SIMPLE_FIELDS = { } +def _GetInstNodeGroup(ctx, default, node_name): + """Gets group UUID of an instance node. + + @type ctx: L{InstanceQueryData} + @param default: Default value + @type node_name: string + @param node_name: Node name + + """ + try: + node = ctx.nodes[node_name] + except KeyError: + return default + else: + return node.group + + +def _GetInstNodeGroupName(ctx, default, node_name): + """Gets group name of an instance node. + + @type ctx: L{InstanceQueryData} + @param default: Default value + @type node_name: string + @param node_name: Node name + + """ + try: + node = ctx.nodes[node_name] + except KeyError: + return default + + try: + group = ctx.groups[node.group] + except KeyError: + return default + + return group.name + + def _BuildInstanceFields(): """Builds list of fields for instance queries. @@ -1708,10 +1752,29 @@ def _BuildInstanceFields(): fields = [ (_MakeField("pnode", "Primary_node", QFT_TEXT, "Primary node"), IQ_CONFIG, QFF_HOSTNAME, _GetItemAttr("primary_node")), + (_MakeField("pnode.group", "PrimaryNodeGroup", QFT_TEXT, + "Primary node's group"), + IQ_NODES, 0, + lambda ctx, inst: _GetInstNodeGroupName(ctx, _FS_UNAVAIL, + inst.primary_node)), + (_MakeField("pnode.group.uuid", "PrimaryNodeGroupUUID", QFT_TEXT, + "Primary node's group UUID"), + IQ_NODES, 0, + lambda ctx, inst: _GetInstNodeGroup(ctx, _FS_UNAVAIL, inst.primary_node)), # TODO: Allow filtering by secondary node as hostname (_MakeField("snodes", "Secondary_Nodes", QFT_OTHER, "Secondary nodes; usually this will just be one node"), IQ_CONFIG, 0, lambda ctx, inst: list(inst.secondary_nodes)), + (_MakeField("snodes.group", "SecondaryNodesGroups", QFT_OTHER, + "Node groups of secondary nodes"), + IQ_NODES, 0, + lambda ctx, inst: map(compat.partial(_GetInstNodeGroupName, ctx, None), + inst.secondary_nodes)), + (_MakeField("snodes.group.uuid", "SecondaryNodesGroupsUUID", QFT_OTHER, + "Node group UUIDs of secondary nodes"), + IQ_NODES, 0, + lambda ctx, inst: map(compat.partial(_GetInstNodeGroup, ctx, None), + inst.secondary_nodes)), (_MakeField("admin_state", "Autostart", QFT_BOOL, "Desired state of instance (if set, the instance should be" " up)"), diff --git a/test/ganeti.query_unittest.py b/test/ganeti.query_unittest.py index 213470a2c935e4616ce574066b60d83efe7bb645..5618c7f15e01e2d32b1895218a1f16131e6319f5 100755 --- a/test/ganeti.query_unittest.py +++ b/test/ganeti.query_unittest.py @@ -581,7 +581,7 @@ class TestInstanceQuery(unittest.TestCase): ] iqd = query.InstanceQueryData(instances, cluster, None, [], [], {}, - set(), {}) + set(), {}, None, None) self.assertEqual(q.Query(iqd), [[(constants.RS_NORMAL, "inst1"), (constants.RS_NORMAL, 128), @@ -610,7 +610,7 @@ class TestInstanceQuery(unittest.TestCase): q = self._Create(selected) self.assertEqual(q.RequestedData(), set([query.IQ_CONFIG, query.IQ_LIVE, query.IQ_DISKUSAGE, - query.IQ_CONSOLE])) + query.IQ_CONSOLE, query.IQ_NODES])) cluster = objects.Cluster(cluster_name="testcluster", hvparams=constants.HVC_DEFAULTS, @@ -767,7 +767,7 @@ class TestInstanceQuery(unittest.TestCase): iqd = query.InstanceQueryData(instances, cluster, disk_usage, offline_nodes, bad_nodes, live_data, - wrongnode_inst, consinfo) + wrongnode_inst, consinfo, {}, {}) result = q.Query(iqd) self.assertEqual(len(result), len(instances)) self.assert_(compat.all(len(row) == len(selected)