Commit 4b92e992 authored by Helga Velroyen's avatar Helga Velroyen

RPC 'node_info': <storage_type,key> instead of vg_names

This replaces the field 'vg_names' in the RPC call of 'node info' by
'storage_units'. A storage unit is a tuple <storage_type,key>
and a generalization of a vg_name. The list of vg names is replaced by
a list of storage units. The modified RPC call will be used to report
storage space for more than just lvm volume groups. What the 'key' is
depends on the storage type. For storage type lvm-vg, the key is the
volume group name. To keep backward compatibility, all functions that
use the old vg_names, convert them to a list where every volume group
is mapped to a tuple [('lvm-vg',volume_group)] before making the call.
Signed-off-by: default avatarHelga Velroyen <helgav@google.com>
Reviewed-by: default avatarBernardo Dal Seno <bdalseno@google.com>
parent 1bb99a33
......@@ -620,11 +620,13 @@ def _GetNamedNodeInfo(names, fn):
return map(fn, names)
def GetNodeInfo(vg_names, hv_names, excl_stor):
def GetNodeInfo(storage_units, hv_names, excl_stor):
"""Gives back a hash with different information about the node.
@type vg_names: list of string
@param vg_names: Names of the volume groups to ask for disk space information
@type storage_units: list of pairs (string, string)
@param storage_units: List of pairs (storage unit, identifier) to ask for disk
space information. In case of lvm-vg, the identifier is
the VG name.
@type hv_names: list of string
@param hv_names: Names of the hypervisors to ask for node information
@type excl_stor: boolean
......@@ -635,10 +637,51 @@ def GetNodeInfo(vg_names, hv_names, excl_stor):
"""
bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
vg_info = _GetNamedNodeInfo(vg_names, (lambda vg: _GetVgInfo(vg, excl_stor)))
storage_info = _GetNamedNodeInfo(
storage_units,
(lambda storage_unit: _ApplyStorageInfoFunction(storage_unit[0],
storage_unit[1],
excl_stor)))
hv_info = _GetNamedNodeInfo(hv_names, _GetHvInfo)
return (bootid, vg_info, hv_info)
return (bootid, storage_info, hv_info)
# FIXME: implement storage reporting for all missing storage types.
_STORAGE_TYPE_INFO_FN = {
constants.ST_BLOCK: None,
constants.ST_DISKLESS: None,
constants.ST_EXT: None,
constants.ST_FILE: None,
constants.ST_LVM_VG: _GetVgInfo,
constants.ST_RADOS: None,
}
def _ApplyStorageInfoFunction(storage_type, storage_key, *args):
"""Looks up and applies the correct function to calculate free and total
storage for the given storage type.
@type storage_type: string
@param storage_type: the storage type for which the storage shall be reported.
@type storage_key: string
@param storage_key: identifier of a storage unit, e.g. the volume group name
of an LVM storage unit
@type args: any
@param args: various parameters that can be used for storage reporting. These
parameters and their semantics vary from storage type to storage type and
are just propagated in this function.
@return: the results of the application of the storage space function (see
_STORAGE_TYPE_INFO_FN) if storage space reporting is implemented for that
storage type
@raises NotImplementedError: for storage types who don't support space
reporting yet
"""
fn = _STORAGE_TYPE_INFO_FN[storage_type]
if fn is not None:
return fn(storage_key, *args)
else:
raise NotImplementedError
def _CheckExclusivePvs(pvi_list):
......
......@@ -1042,10 +1042,13 @@ class LUInstanceCreate(LogicalUnit):
elif self.op.disk_template == constants.DT_EXT:
# FIXME: Function that checks prereqs if needed
pass
else:
elif self.op.disk_template in utils.GetLvmDiskTemplates():
# Check lv size requirements, if not adopting
req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
else:
# FIXME: add checks for other, non-adopting, non-lvm disk templates
pass
elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
......
......@@ -847,7 +847,10 @@ def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
"""
es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, nodenames)
nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None, es_flags)
# FIXME: This maps everything to storage type 'lvm-vg' to maintain
# the current functionality. Refactor to make it more flexible.
nodeinfo = lu.rpc.call_node_info(nodenames, [(constants.ST_LVM_VG, vg)], None,
es_flags)
for node in nodenames:
info = nodeinfo[node]
info.Raise("Cannot get current information from node %s" % node,
......
......@@ -1173,7 +1173,10 @@ class NodeQuery(QueryBase):
toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, toquery_nodes)
node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
# FIXME: This currently maps everything to lvm, this should be more
# flexible
storage_units = [(constants.ST_LVM_VG, lu.cfg.GetVGName())]
node_data = lu.rpc.call_node_info(toquery_nodes, storage_units,
[lu.cfg.GetHypervisorType()], es_flags)
live_data = dict((name, rpc.MakeLegacyNodeInfo(nresult.payload))
for (name, nresult) in node_data.items()
......
......@@ -434,7 +434,7 @@ class IAllocator(object):
vg_name = cfg.GetVGName()
if vg_name is not None:
has_lvm = True
vg_req = [vg_name]
vg_req = [(constants.ST_LVM_VG, vg_name)]
else:
has_lvm = False
vg_req = []
......
......@@ -464,8 +464,9 @@ _NODE_CALLS = [
("address", None, "IP address"),
], None, None, "Checks if a node has the given IP address"),
("node_info", MULTI, None, constants.RPC_TMO_URGENT, [
("vg_names", None,
"Names of the volume groups to ask for disk space information"),
("storage_units", None,
"List of tuples '<storage_type>,<key>' to ask for disk space"
" information"),
("hv_names", None,
"Names of the hypervisors to ask for node information"),
("exclusive_storage", None,
......
......@@ -720,8 +720,8 @@ class NodeRequestHandler(http.server.HttpServerHandler):
"""Query node information.
"""
(vg_names, hv_names, excl_stor) = params
return backend.GetNodeInfo(vg_names, hv_names, excl_stor)
(storage_units, hv_names, excl_stor) = params
return backend.GetNodeInfo(storage_units, hv_names, excl_stor)
@staticmethod
def perspective_etc_hosts_modify(params):
......
......@@ -42,6 +42,7 @@ import Ganeti.Rpc
import Ganeti.Query.Language
import Ganeti.Query.Common
import Ganeti.Query.Types
import qualified Ganeti.Types as T
import Ganeti.Utils (niceSort)
-- | Runtime is the resulting type for NodeInfo call.
......@@ -224,6 +225,8 @@ collectLiveData False _ nodes =
return $ zip nodes (repeat $ Left (RpcResultError "Live data disabled"))
collectLiveData True cfg nodes = do
let vgs = maybeToList . clusterVolumeGroupName $ configCluster cfg
-- FIXME: This currently sets every storage unit to LVM
storage_units = zip (repeat T.StorageLvmVg) vgs
hvs = [getDefaultHypervisor cfg]
step n (bn, gn, em) =
let ndp' = getNodeNdParams cfg n
......@@ -232,7 +235,8 @@ collectLiveData True cfg nodes = do
(nodeName n, ndpExclusiveStorage ndp) : em)
Nothing -> (n : bn, gn, em)
(bnodes, gnodes, emap) = foldr step ([], [], []) nodes
rpcres <- executeRpcCall gnodes (RpcCallNodeInfo vgs hvs (Map.fromList emap))
rpcres <- executeRpcCall gnodes (RpcCallNodeInfo storage_units hvs
(Map.fromList emap))
-- FIXME: The order of nodes in the result could be different from the input
return $ zip bnodes (repeat $ Left (RpcResultError "Broken configuration"))
++ rpcres
......@@ -337,7 +337,7 @@ instance Rpc RpcCallInstanceList RpcResultInstanceList where
-- | NodeInfo
-- Return node information.
$(buildObject "RpcCallNodeInfo" "rpcCallNodeInfo"
[ simpleField "volume_groups" [t| [String] |]
[ simpleField "storage_units" [t| [ (StorageType,String) ] |]
, simpleField "hypervisors" [t| [Hypervisor] |]
, simpleField "exclusive_storage" [t| Map.Map String Bool |]
])
......@@ -369,7 +369,7 @@ instance RpcCall RpcCallNodeInfo where
rpcCallTimeout _ = rpcTimeoutToRaw Urgent
rpcCallAcceptOffline _ = False
rpcCallData n call = J.encode
( rpcCallNodeInfoVolumeGroups call
( rpcCallNodeInfoStorageUnits call
, rpcCallNodeInfoHypervisors call
, fromMaybe (error $ "Programmer error: missing parameter for node named "
++ nodeName n)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment