Commit da803ff1 authored by Helga Velroyen's avatar Helga Velroyen
Browse files

Extend RPC call 'node_info' by storage parameters



This patch extends the actual RPC call to accept storage
parameters with each storage units. It adjusts all code
which performs this code by integrating the 'exclusive
storage' flag into the lvm storage unit.
Signed-off-by: default avatarHelga Velroyen <helgav@google.com>
Reviewed-by: default avatarThomas Thrainer <thomasth@google.com>
parent 152759e4
......@@ -2029,7 +2029,7 @@ def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
or we cannot check the node
"""
nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs, None)
nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
for node_uuid in node_uuids:
info = nodeinfo[node_uuid]
node_name = lu.cfg.GetNodeName(node_uuid)
......@@ -2859,7 +2859,7 @@ class LUInstanceSetParams(LogicalUnit):
hvspecs = [(self.instance.hypervisor,
self.cluster.hvparams[self.instance.hypervisor])]
nodeinfo = self.rpc.call_node_info(mem_check_list, None,
hvspecs, False)
hvspecs)
pninfo = nodeinfo[pnode_uuid]
msg = pninfo.fail_msg
if msg:
......
......@@ -660,8 +660,7 @@ class TLMigrateInstance(Tasklet):
hvspecs = [(self.instance.hypervisor,
self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])]
nodeinfo = self.rpc.call_node_info(
[self.source_node_uuid, self.target_node_uuid], None, hvspecs,
False)
[self.source_node_uuid, self.target_node_uuid], None, hvspecs)
for ninfo in nodeinfo.values():
ninfo.Raise("Unable to retrieve node information from node '%s'" %
ninfo.node)
......
......@@ -892,11 +892,13 @@ def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
or we cannot check the node
"""
es_flags = rpc.GetExclusiveStorageForNodes(lu.cfg, node_uuids)
lvm_storage_units = [(constants.ST_LVM_VG, vg)]
storage_units = rpc.PrepareStorageUnitsForNodes(lu.cfg, lvm_storage_units,
node_uuids)
hvname = lu.cfg.GetHypervisorType()
hvparams = lu.cfg.GetClusterInfo().hvparams
nodeinfo = lu.rpc.call_node_info(node_uuids, [(constants.ST_LVM_VG, vg)],
[(hvname, hvparams[hvname])], es_flags)
nodeinfo = lu.rpc.call_node_info(node_uuids, storage_units,
[(hvname, hvparams[hvname])])
for node in node_uuids:
node_name = lu.cfg.GetNodeName(node)
......
......@@ -483,8 +483,7 @@ def CheckNodeFreeMemory(lu, node_uuid, reason, requested, hvname, hvparams):
"""
node_name = lu.cfg.GetNodeName(node_uuid)
nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)],
False)
nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)])
nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name,
prereq=True, ecode=errors.ECODE_ENVIRON)
(_, _, (hv_info, )) = nodeinfo[node_uuid].payload
......
......@@ -1183,19 +1183,20 @@ class NodeQuery(QueryBase):
# filter out non-vm_capable nodes
toquery_node_uuids = [node.uuid for node in all_info.values()
if node.vm_capable and node.uuid in node_uuids]
es_flags = rpc.GetExclusiveStorageForNodes(lu.cfg, toquery_node_uuids)
# FIXME: This currently maps everything to lvm, this should be more
# flexible
# FIXME: this per default asks for storage space information for all
# enabled disk templates. Fix this by making it possible to specify
# space report fields for specific disk templates.
raw_storage_units = utils.storage.GetStorageUnitsOfCluster(
lu.cfg, include_spindles=True)
storage_units = rpc.PrepareStorageUnitsForNodes(
lu.cfg, raw_storage_units, toquery_node_uuids)
lvm_enabled = utils.storage.IsLvmEnabled(
lu.cfg.GetClusterInfo().enabled_disk_templates)
storage_units = utils.storage.GetStorageUnitsOfCluster(
lu.cfg, include_spindles=True)
default_hypervisor = lu.cfg.GetHypervisorType()
hvparams = lu.cfg.GetClusterInfo().hvparams[default_hypervisor]
hvspecs = [(default_hypervisor, hvparams)]
node_data = lu.rpc.call_node_info(toquery_node_uuids, storage_units,
hvspecs, es_flags)
hvspecs)
live_data = dict(
(uuid, rpc.MakeLegacyNodeInfo(nresult.payload,
require_vg_info=lvm_enabled))
......
......@@ -413,11 +413,12 @@ class IAllocator(object):
@return: the result of the node info RPC call
"""
es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_list)
storage_units = utils.storage.GetStorageUnitsOfCluster(
storage_units_raw = utils.storage.GetStorageUnitsOfCluster(
self.cfg, include_spindles=True)
storage_units = rpc.PrepareStorageUnitsForNodes(self.cfg, storage_units_raw,
node_list)
hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])]
return self.rpc.call_node_info(node_list, storage_units, hvspecs, es_flags)
return self.rpc.call_node_info(node_list, storage_units, hvspecs)
def _ComputeClusterData(self):
"""Compute the generic allocator input data.
......
......@@ -718,7 +718,7 @@ def AnnotateDiskParams(template, disks, disk_params):
return [annotation_fn(disk.Copy(), ld_params) for disk in disks]
def _GetESFlag(cfg, node_uuid):
def _GetExclusiveStorageFlag(cfg, node_uuid):
ni = cfg.GetNodeInfo(node_uuid)
if ni is None:
raise errors.OpPrereqError("Invalid node name %s" % node_uuid,
......@@ -726,6 +726,31 @@ def _GetESFlag(cfg, node_uuid):
return cfg.GetNdParams(ni)[constants.ND_EXCLUSIVE_STORAGE]
def _AddExclusiveStorageFlagToLvmStorageUnits(storage_units, es_flag):
"""Adds the exclusive storage flag to lvm units.
This function creates a copy of the storage_units lists, with the
es_flag being added to all lvm storage units.
@type storage_units: list of pairs (string, string)
@param storage_units: list of 'raw' storage units, consisting only of
(storage_type, storage_key)
@type es_flag: boolean
@param es_flag: exclusive storage flag
@rtype: list of tuples (string, string, list)
@return: list of storage units (storage_type, storage_key, params) with
the params containing the es_flag for lvm-vg storage units
"""
result = []
for (storage_type, storage_key) in storage_units:
if storage_type == constants.ST_LVM_VG:
result.append((storage_type, storage_key, es_flag))
else:
result.append((storage_type, storage_key, []))
return result
def GetExclusiveStorageForNodes(cfg, node_uuids):
"""Return the exclusive storage flag for all the given nodes.
......@@ -734,16 +759,42 @@ def GetExclusiveStorageForNodes(cfg, node_uuids):
@type node_uuids: list or tuple
@param node_uuids: node UUIDs for which to read the flag
@rtype: dict
@return: mapping from node names to exclusive storage flags
@return: mapping from node uuids to exclusive storage flags
@raise errors.OpPrereqError: if any given node name has no corresponding
node
"""
getflag = lambda n: _GetESFlag(cfg, n)
getflag = lambda n: _GetExclusiveStorageFlag(cfg, n)
flags = map(getflag, node_uuids)
return dict(zip(node_uuids, flags))
def PrepareStorageUnitsForNodes(cfg, storage_units, node_uuids):
"""Return the lvm storage unit for all the given nodes.
Main purpose of this function is to map the exclusive storage flag, which
can be different for each node, to the default LVM storage unit.
@type cfg: L{config.ConfigWriter}
@param cfg: cluster configuration
@type storage_units: list of pairs (string, string)
@param storage_units: list of 'raw' storage units, e.g. pairs of
(storage_type, storage_key)
@type node_uuids: list or tuple
@param node_uuids: node UUIDs for which to read the flag
@rtype: dict
@return: mapping from node uuids to a list of storage units which include
the exclusive storage flag for lvm storage
@raise errors.OpPrereqError: if any given node name has no corresponding
node
"""
getunit = lambda n: _AddExclusiveStorageFlagToLvmStorageUnits(
storage_units, _GetExclusiveStorageFlag(cfg, n))
flags = map(getunit, node_uuids)
return dict(zip(node_uuids, flags))
#: Generic encoders
_ENCODERS = {
rpc_defs.ED_OBJECT_DICT: _ObjectToDict,
......
......@@ -132,12 +132,12 @@ def _BlockdevGetMirrorStatusMultiPostProc(result):
def _NodeInfoPreProc(node, args):
"""Prepare the exclusive_storage argument for node_info calls."""
assert len(args) == 3
# The third argument is either a dictionary with one value for each node, or
# a fixed value to be used for all the nodes
if type(args[2]) is dict:
return [args[0], args[1], args[2][node]]
"""Prepare the storage_units argument for node_info calls."""
assert len(args) == 2
# The storage_units argument is either a dictionary with one value for each
# node, or a fixed value to be used for all the nodes
if type(args[0]) is dict:
return [args[0][node], args[1]]
else:
return args
......@@ -476,13 +476,11 @@ _NODE_CALLS = [
], None, None, "Checks if a node has the given IP address"),
("node_info", MULTI, None, constants.RPC_TMO_URGENT, [
("storage_units", None,
"List of tuples '<storage_type>,<key>' to ask for disk space"
" information"),
"List of tuples '<storage_type>,<key>,[<param>]' to ask for disk space"
" information; the parameter list varies depending on the storage_type"),
("hv_specs", None,
"List of hypervisor specification (name, hvparams) to ask for node "
"information"),
("exclusive_storage", None,
"Whether exclusive storage is enabled"),
], _NodeInfoPreProc, None, "Return node information"),
("node_verify", MULTI, None, constants.RPC_TMO_NORMAL, [
("checkdict", None, "What to verify"),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment