Commit 491d02ca authored by Iustin Pop's avatar Iustin Pop
Browse files

Merge branch 'devel-2.7'



* devel-2.7: (23 commits)
  Add export lists for files which didn't have them
  Fix Haskell compatibility tests with disabled file storage
  Fix QA with disabled file storage
  Fix convert-constants handling of booleans
  Fix handling of disabled (shared) file storage
  Fix low verbosity levels in htools
  Fix improperly formatted docstring
  Allow iallocator to work without LVM storage
  Allow rpc.MakeLegacyNodeInfo to parse non-LVM results
  Fix LUTestAllocator with instance alloc
  Fix confd issue regarding --no-lvm-storage
  Fix networks in _PrepareNicModifications()
  Fix sphinx label namespace
  Clarify use of move-instance with self-signed certificates
  Remove early returns in network LUs
  Fix HooksDict() in case of no tags
  Add networks to _AllIDs()
  Fix locking in LUNetworkConnect()
  Fix networks in LUInstanceSetParams()
  Fix another docstring typo
  ...

Conflicts:
        src/Ganeti/Query/Node.hs (trivial; function rename in master
                                  and 'vgs' change in devel-2.7)
        also exported new entity from TestCommon.hs (used in master)
Signed-off-by: default avatarIustin Pop <iustin@google.com>
Reviewed-by: default avatarHelga Velroyen <helgav@google.com>
parents c56dd17b dde85e1e
#!/usr/bin/python
#
# Copyright (C) 2011, 2012 Google Inc.
# Copyright (C) 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -40,6 +40,9 @@ RELEASED_RE = re.compile(r"^\*\(Released (?P<day>[A-Z][a-z]{2}),"
UNRELEASED_RE = re.compile(r"^\*\(unreleased\)\*$")
VERSION_RE = re.compile(r"^Version \d+(\.\d+)+( (beta|rc)\d+)?$")
#: How many days release timestamps may be in the future
TIMESTAMP_FUTURE_DAYS_MAX = 3
errors = []
......@@ -115,6 +118,13 @@ def main():
# would return an inconsistent result if the weekday is incorrect.
parsed_ts = time.mktime(time.strptime(m.group("date"), "%d %b %Y"))
parsed = datetime.date.fromtimestamp(parsed_ts)
today = datetime.date.today()
if (parsed - datetime.timedelta(TIMESTAMP_FUTURE_DAYS_MAX)) > today:
Error("Line %s: %s is more than %s days in the future (today is %s)" %
(fileinput.filelineno(), parsed, TIMESTAMP_FUTURE_DAYS_MAX,
today))
weekday = parsed.strftime("%a")
# Check weekday
......
#!/usr/bin/python
#
# Copyright (C) 2011, 2012 Google Inc.
# Copyright (C) 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -87,6 +87,8 @@ def HaskellTypeVal(value):
"""
if isinstance(value, basestring):
return ("String", "\"%s\"" % StringValueRules(value))
elif isinstance(value, bool):
return ("Bool", "%s" % value)
elif isinstance(value, int):
return ("Int", "%d" % value)
elif isinstance(value, long):
......
......@@ -69,9 +69,10 @@ destination-related options default to the source value (e.g. setting
``--src-ca-file``/``--dest-ca-file``
Path to file containing source cluster Certificate Authority (CA) in
PEM format. For self-signed certificates, this is the certificate
itself. For certificates signed by a third party CA, the complete
chain must be in the file (see documentation for
:manpage:`SSL_CTX_load_verify_locations(3)`).
itself (see more details below in
:ref:`instance-move-certificates`). For certificates signed by a third
party CA, the complete chain must be in the file (see documentation
for :manpage:`SSL_CTX_load_verify_locations(3)`).
``--src-username``/``--dest-username``
RAPI username, must have write access to cluster.
``--src-password-file``/``--dest-password-file``
......@@ -96,6 +97,28 @@ destination-related options default to the source value (e.g. setting
The exit value of the tool is zero if and only if all instance moves
were successful.
.. _instance-move-certificates:
Certificates
------------
If using certificates signed by a CA, then you need to pass the same CA
certificate via both ``--src-ca-file`` and ``dest-ca-file``.
However, if you're using self-signed certificates, this has a few
(security) implications:
- the certificates of both the source and destinations clusters
(``rapi.pem`` from the Ganeti configuration directory, usually
``/var/lib/ganeti/rapi.pem``) must be available to the tool
- by default, the certificates include the private key as well, so
simply copying them to a third machine means that machine can now
impersonate both the source and destination clusters RAPI endpoint
It is therefore recommended to copy only the certificate from the
``rapi.pem`` files, and pass these to ``--src-ca-file`` and
``--dest-ca-file`` appropriately.
.. vim: set textwidth=72 :
.. Local Variables:
.. mode: rst
......
#
#
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -255,7 +255,7 @@ class BlockDev(object):
an attached instance (lvcreate)
- attaching of a python instance to an existing (real) device
The second point, the attachement to a device, is different
The second point, the attachment to a device, is different
depending on whether the device is assembled or not. At init() time,
we search for a device with the same unique_id as us. If found,
good. It also means that the device is already assembled. If not,
......
#
#
 
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -13497,23 +13497,28 @@ class LUInstanceSetParams(LogicalUnit):
params[constants.INIC_MAC] = \
self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
 
#if there is a change in nic's ip/network configuration
# if there is a change in (ip, network) tuple
new_ip = params.get(constants.INIC_IP, old_ip)
if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
if new_ip:
# if IP is pool then require a network and generate one IP
if new_ip.lower() == constants.NIC_IP_POOL:
if not new_net_uuid:
if new_net_uuid:
try:
new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
except errors.ReservationError:
raise errors.OpPrereqError("Unable to get a free IP"
" from the address pool",
errors.ECODE_STATE)
self.LogInfo("Chose IP %s from network %s",
new_ip,
new_net_obj.name)
params[constants.INIC_IP] = new_ip
else:
raise errors.OpPrereqError("ip=pool, but no network found",
errors.ECODE_INVAL)
try:
new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
except errors.ReservationError:
raise errors.OpPrereqError("Unable to get a free IP"
" from the address pool",
errors.ECODE_STATE)
self.LogInfo("Chose IP %s from network %s", new_ip, new_net_obj.name)
params[constants.INIC_IP] = new_ip
elif new_ip != old_ip or new_net_uuid != old_net_uuid:
# Reserve new IP if in the new network if any
elif new_net_uuid:
try:
self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
self.LogInfo("Reserving IP %s in network %s",
......@@ -13522,19 +13527,19 @@ class LUInstanceSetParams(LogicalUnit):
raise errors.OpPrereqError("IP %s not available in network %s" %
(new_ip, new_net_obj.name),
errors.ECODE_NOTUNIQUE)
# new net is None
elif not new_net_uuid and self.op.conflicts_check:
# new network is None so check if new IP is a conflicting IP
elif self.op.conflicts_check:
_CheckForConflictingIp(self, new_ip, pnode)
 
if old_ip:
# release old IP if old network is not None
if old_ip and old_net_uuid:
try:
self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
except errors.AddressPoolError:
logging.warning("Release IP %s not contained in network %s",
old_ip, old_net_obj.name)
 
# there are no changes in (net, ip) tuple
# there are no changes in (ip, network) tuple and old network is not None
elif (old_net_uuid is not None and
(req_link is not None or req_mode is not None)):
raise errors.OpPrereqError("Not allowed to change link or mode of"
......@@ -14112,18 +14117,19 @@ class LUInstanceSetParams(LogicalUnit):
if root.dev_type in constants.LDS_DRBD:
self.cfg.AddTcpUdpPort(root.logical_id[2])
 
@staticmethod
def _CreateNewNic(idx, params, private):
def _CreateNewNic(self, idx, params, private):
"""Creates data structure for a new network interface.
 
"""
mac = params[constants.INIC_MAC]
ip = params.get(constants.INIC_IP, None)
net = params.get(constants.INIC_NETWORK, None)
net_uuid = self.cfg.LookupNetwork(net)
#TODO: not private.filled?? can a nic have no nicparams??
nicparams = private.filled
nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, nicparams=nicparams)
 
return (objects.NIC(mac=mac, ip=ip, network=net, nicparams=nicparams), [
return (nobj, [
("nic.%d" % idx,
"add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
(mac, ip, private.filled[constants.NIC_MODE],
......@@ -14131,18 +14137,23 @@ class LUInstanceSetParams(LogicalUnit):
net)),
])
 
@staticmethod
def _ApplyNicMods(idx, nic, params, private):
def _ApplyNicMods(self, idx, nic, params, private):
"""Modifies a network interface.
 
"""
changes = []
 
for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NETWORK]:
for key in [constants.INIC_MAC, constants.INIC_IP]:
if key in params:
changes.append(("nic.%s/%d" % (key, idx), params[key]))
setattr(nic, key, params[key])
 
new_net = params.get(constants.INIC_NETWORK, nic.network)
new_net_uuid = self.cfg.LookupNetwork(new_net)
if new_net_uuid != nic.network:
changes.append(("nic.network/%d" % idx, new_net))
nic.network = new_net_uuid
if private.filled:
nic.nicparams = private.filled
 
......@@ -16166,7 +16177,8 @@ class LUTestAllocator(NoHooksLU):
nics=self.op.nics,
vcpus=self.op.vcpus,
spindle_use=self.op.spindle_use,
hypervisor=self.op.hypervisor)
hypervisor=self.op.hypervisor,
node_whitelist=None)
elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
req = iallocator.IAReqRelocate(name=self.op.name,
relocate_from=list(self.relocate_from))
......@@ -16702,6 +16714,11 @@ class LUNetworkConnect(LogicalUnit):
 
assert self.group_uuid in owned_groups
 
# Check if locked instances are still correct
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
if self.op.conflicts_check:
_CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
self.netparams = {
constants.NIC_MODE: self.network_mode,
constants.NIC_LINK: self.network_link,
......@@ -16716,23 +16733,22 @@ class LUNetworkConnect(LogicalUnit):
self.LogWarning("Network '%s' is already mapped to group '%s'" %
(self.network_name, self.group.name))
self.connected = True
return
 
if self.op.conflicts_check:
# check only if not already connected
elif self.op.conflicts_check:
pool = network.AddressPool(self.cfg.GetNetwork(self.network_uuid))
 
_NetworkConflictCheck(self, lambda nic: pool.Contains(nic.ip),
"connect to")
"connect to", owned_instances)
 
def Exec(self, feedback_fn):
if self.connected:
return
self.group.networks[self.network_uuid] = self.netparams
self.cfg.Update(self.group, feedback_fn)
# Connect the network and update the group only if not already connected
if not self.connected:
self.group.networks[self.network_uuid] = self.netparams
self.cfg.Update(self.group, feedback_fn)
 
 
def _NetworkConflictCheck(lu, check_fn, action):
def _NetworkConflictCheck(lu, check_fn, action, instances):
"""Checks for network interface conflicts with a network.
 
@type lu: L{LogicalUnit}
......@@ -16744,13 +16760,9 @@ def _NetworkConflictCheck(lu, check_fn, action):
@raise errors.OpPrereqError: If conflicting IP addresses are found.
 
"""
# Check if locked instances are still correct
owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
_CheckNodeGroupInstances(lu.cfg, lu.group_uuid, owned_instances)
conflicts = []
 
for (_, instance) in lu.cfg.GetMultiInstanceInfo(owned_instances):
for (_, instance) in lu.cfg.GetMultiInstanceInfo(instances):
instconflicts = [(idx, nic.ip)
for (idx, nic) in enumerate(instance.nics)
if check_fn(nic)]
......@@ -16824,23 +16836,27 @@ class LUNetworkDisconnect(LogicalUnit):
 
assert self.group_uuid in owned_groups
 
# Check if locked instances are still correct
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
_CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
self.group = self.cfg.GetNodeGroup(self.group_uuid)
self.connected = True
if self.network_uuid not in self.group.networks:
self.LogWarning("Network '%s' is not mapped to group '%s'",
self.network_name, self.group.name)
self.connected = False
return
 
_NetworkConflictCheck(self, lambda nic: nic.network == self.network_uuid,
"disconnect from")
# We need this check only if network is not already connected
else:
_NetworkConflictCheck(self, lambda nic: nic.network == self.network_uuid,
"disconnect from", owned_instances)
 
def Exec(self, feedback_fn):
if not self.connected:
return
del self.group.networks[self.network_uuid]
self.cfg.Update(self.group, feedback_fn)
# Disconnect the network and update the group only if network is connected
if self.connected:
del self.group.networks[self.network_uuid]
self.cfg.Update(self.group, feedback_fn)
 
 
#: Query type implementations
......
......@@ -2021,6 +2021,7 @@ class ConfigWriter:
return (self._config_data.instances.values() +
self._config_data.nodes.values() +
self._config_data.nodegroups.values() +
self._config_data.networks.values() +
[self._config_data.cluster])
def _OpenConfig(self, accept_foreign):
......
......@@ -431,7 +431,14 @@ class IAllocator(object):
node_whitelist = None
es_flags = rpc.GetExclusiveStorageForNodeNames(cfg, node_list)
node_data = self.rpc.call_node_info(node_list, [cfg.GetVGName()],
vg_name = cfg.GetVGName()
if vg_name is not None:
has_lvm = True
vg_req = [vg_name]
else:
has_lvm = False
vg_req = []
node_data = self.rpc.call_node_info(node_list, vg_req,
[hypervisor_name], es_flags)
node_iinfo = \
self.rpc.call_all_instances_info(node_list,
......@@ -441,7 +448,7 @@ class IAllocator(object):
config_ndata = self._ComputeBasicNodeData(cfg, ninfo, node_whitelist)
data["nodes"] = self._ComputeDynamicNodeData(ninfo, node_data, node_iinfo,
i_list, config_ndata)
i_list, config_ndata, has_lvm)
assert len(data["nodes"]) == len(ninfo), \
"Incomplete node data computed"
......@@ -494,7 +501,7 @@ class IAllocator(object):
@staticmethod
def _ComputeDynamicNodeData(node_cfg, node_data, node_iinfo, i_list,
node_results):
node_results, has_lvm):
"""Compute global node data.
@param node_results: the basic node structures as filled from the config
......@@ -511,17 +518,22 @@ class IAllocator(object):
nresult.Raise("Can't get data for node %s" % nname)
node_iinfo[nname].Raise("Can't get node instance info from node %s" %
nname)
remote_info = rpc.MakeLegacyNodeInfo(nresult.payload)
remote_info = rpc.MakeLegacyNodeInfo(nresult.payload,
require_vg_info=has_lvm)
for attr in ["memory_total", "memory_free", "memory_dom0",
"vg_size", "vg_free", "cpu_total"]:
def get_attr(attr):
if attr not in remote_info:
raise errors.OpExecError("Node '%s' didn't return attribute"
" '%s'" % (nname, attr))
if not isinstance(remote_info[attr], int):
value = remote_info[attr]
if not isinstance(value, int):
raise errors.OpExecError("Node '%s' returned invalid value"
" for '%s': %s" %
(nname, attr, remote_info[attr]))
(nname, attr, value))
return value
mem_free = get_attr("memory_free")
# compute memory used by primary instances
i_p_mem = i_p_up_mem = 0
for iinfo, beinfo in i_list:
......@@ -532,19 +544,27 @@ class IAllocator(object):
else:
i_used_mem = int(node_iinfo[nname].payload[iinfo.name]["memory"])
i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
remote_info["memory_free"] -= max(0, i_mem_diff)
mem_free -= max(0, i_mem_diff)
if iinfo.admin_state == constants.ADMINST_UP:
i_p_up_mem += beinfo[constants.BE_MAXMEM]
# TODO: replace this with proper storage reporting
if has_lvm:
total_disk = get_attr("vg_size")
free_disk = get_attr("vg_free")
else:
# we didn't even ask the node for VG status, so use zeros
total_disk = free_disk = 0
# compute memory used by instances
pnr_dyn = {
"total_memory": remote_info["memory_total"],
"reserved_memory": remote_info["memory_dom0"],
"free_memory": remote_info["memory_free"],
"total_disk": remote_info["vg_size"],
"free_disk": remote_info["vg_free"],
"total_cpus": remote_info["cpu_total"],
"total_memory": get_attr("memory_total"),
"reserved_memory": get_attr("memory_dom0"),
"free_memory": mem_free,
"total_disk": total_disk,
"free_disk": free_disk,
"total_cpus": get_attr("cpu_total"),
"i_pri_memory": i_p_mem,
"i_pri_up_memory": i_p_up_mem,
}
......
......@@ -2027,7 +2027,7 @@ class Network(TaggableObject):
result = {
"%sNETWORK_NAME" % prefix: self.name,
"%sNETWORK_UUID" % prefix: self.uuid,
"%sNETWORK_TAGS" % prefix: " ".join(self.tags),
"%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
}
if self.network:
result["%sNETWORK_SUBNET" % prefix] = self.network
......
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -29,9 +29,15 @@ from ganeti import vcluster
# Build-time constants
DEFAULT_FILE_STORAGE_DIR = vcluster.AddNodePrefix(_autoconf.FILE_STORAGE_DIR)
DEFAULT_SHARED_FILE_STORAGE_DIR = \
vcluster.AddNodePrefix(_autoconf.SHARED_FILE_STORAGE_DIR)
if _autoconf.ENABLE_FILE_STORAGE:
DEFAULT_FILE_STORAGE_DIR = vcluster.AddNodePrefix(_autoconf.FILE_STORAGE_DIR)
else:
DEFAULT_FILE_STORAGE_DIR = _autoconf.FILE_STORAGE_DIR
if _autoconf.ENABLE_SHARED_FILE_STORAGE:
DEFAULT_SHARED_FILE_STORAGE_DIR = \
vcluster.AddNodePrefix(_autoconf.SHARED_FILE_STORAGE_DIR)
else:
DEFAULT_SHARED_FILE_STORAGE_DIR = _autoconf.SHARED_FILE_STORAGE_DIR
EXPORT_DIR = vcluster.AddNodePrefix(_autoconf.EXPORT_DIR)
OS_SEARCH_PATH = _autoconf.OS_SEARCH_PATH
ES_SEARCH_PATH = _autoconf.ES_SEARCH_PATH
......
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -571,18 +571,25 @@ def _EncodeBlockdevRename(value):
return [(d.ToDict(), uid) for d, uid in value]
def MakeLegacyNodeInfo(data):
def MakeLegacyNodeInfo(data, require_vg_info=True):
"""Formats the data returned by L{rpc.RpcRunner.call_node_info}.
Converts the data into a single dictionary. This is fine for most use cases,
but some require information from more than one volume group or hypervisor.
@param require_vg_info: raise an error if the returnd vg_info
doesn't have any values
"""
(bootid, (vg_info, ), (hv_info, )) = data
(bootid, vgs_info, (hv_info, )) = data
ret = utils.JoinDisjointDicts(hv_info, {"bootid": bootid})
if require_vg_info or vgs_info:
(vg0_info, ) = vgs_info
ret = utils.JoinDisjointDicts(vg0_info, ret)
return utils.JoinDisjointDicts(utils.JoinDisjointDicts(vg_info, hv_info), {
"bootid": bootid,
})
return ret
def _AnnotateDParamsDRBD(disk, (drbd_params, data_params, meta_params)):
......
#
#
# Copyright (C) 2007, 2011, 2012 Google Inc.
# Copyright (C) 2007, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -115,8 +115,18 @@ def TestNodeStorage():
master = qa_config.GetMasterNode()
for storage_type in constants.VALID_STORAGE_TYPES:
cmd = ["gnt-node", "list-storage", "--storage-type", storage_type]
# Skip file storage if not enabled, otherwise QA will fail; we
# just test for basic failure, but otherwise skip the rest of the
# tests
if storage_type == constants.ST_FILE and not constants.ENABLE_FILE_STORAGE:
AssertCommand(cmd, fail=True)
continue
# Test simple list
AssertCommand(["gnt-node", "list-storage", "--storage-type", storage_type])
AssertCommand(cmd)
# Test all storage fields
cmd = ["gnt-node", "list-storage", "--storage-type", storage_type,
......
......@@ -319,7 +319,7 @@ checkGroup verbose gname nl il = do
putStrLn $ "Selected node group: " ++ gname
let (bad_nodes, bad_instances) = Cluster.computeBadItems nl il
unless (verbose == 0) $ printf
unless (verbose < 1) $ printf
"Initial check done: %d bad nodes, %d bad instances.\n"
(length bad_nodes) (length bad_instances)
......@@ -399,7 +399,7 @@ main opts args = do
putStr sol_msg
unless (verbose == 0) $
unless (verbose < 1) $
printf "Solution length=%d\n" (length ord_plc)
let cmd_jobs = Cluster.splitJobs cmd_strs
......
......@@ -4,7 +4,7 @@
{-
Copyright (C) 2012 Google Inc.
Copyright (C) 2012, 2013 Google Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -191,7 +191,7 @@ printStats _ True level phase values = do
printStats verbose False level phase values = do
let prefix = phaseLevelDescr phase level
descr = descrData level
unless (verbose == 0) $ do
unless (verbose < 1) $ do
putStrLn ""
putStr prefix
mapM_ (uncurry (printf " %s: %s\n")) (zip descr values)
......@@ -311,7 +311,7 @@ main opts args = do
clusterstats = map sum . transpose . map snd $ groupsstats
needrebalance = clusterNeedsRebalance clusterstats
unless (verbose == 0 || machineread) .
unless (verbose < 1 || machineread) .
putStrLn $ if nosimulation
then "Running in no-simulation mode."
else if needrebalance
......
......@@ -578,7 +578,8 @@ $(buildObject "Cluster" "cluster" $
, simpleField "highest_used_port" [t| Int |]
, simpleField "tcpudp_port_pool" [t| [Int] |]
, simpleField "mac_prefix" [t| String |]
, simpleField "volume_group_name" [t| String |]
, optionalField $
simpleField "volume_group_name" [t| String |]
, simpleField "reserved_lvs" [t| [String] |]
, optionalField $
simpleField "drbd_usermode_helper" [t| String |]
......
......@@ -31,6 +31,7 @@ module Ganeti.Query.Node
import Control.Applicative
import Data.List
import Data.Maybe
import qualified Data.Map as Map
import qualified Text.JSON as J
......@@ -109,7 +110,7 @@ nodeLiveFieldBuilder (fname, ftitle, ftype, _, fdoc) =
, FieldRuntime $ nodeLiveRpcCall fname
, QffNormal)
-- | The docstring for the node role. Note that we use 'reverse in
-- | The docstring for the node role. Note that we use 'reverse' in
-- order to keep the same order as Python.
nodeRoleDoc :: String
nodeRoleDoc =
......@@ -221,7 +222,7 @@ collectLiveData:: Bool -> ConfigData -> [Node] -> IO [(Node, Runtime)]
collectLiveData False _ nodes =
return $ zip nodes (repeat $ Left (RpcResultError "Live data disabled"))
collectLiveData True cfg nodes = do
let vgs = [clusterVolumeGroupName $ configCluster cfg]
let vgs = maybeToList . clusterVolumeGroupName $ configCluster cfg
hvs = [getDefaultHypervisor cfg]
step n (bn, gn, em) =
let ndp' = getNodeNdParams cfg n
......
......@@ -116,7 +116,8 @@ handleCall cdata QueryClusterInfo =
, ("master_netmask", showJSON $ clusterMasterNetmask cluster)
, ("use_external_mip_script",
showJSON $ clusterUseExternalMipScript cluster)