Commit 250a9404 authored by Bernardo Dal Seno's avatar Bernardo Dal Seno
Browse files

"exclusive_storage" cannot be changed on single nodes



There's never been support for a configuration where nodes in the same node
group have different values of the exclusive_storage flag. This patch
disables the possibility to change the flag for individual nodes.
Signed-off-by: default avatarBernardo Dal Seno <bdalseno@google.com>
Reviewed-by: default avatarGuido Trotter <ultrotter@google.com>
parent 45f62156
......@@ -1020,18 +1020,32 @@ def _CheckOutputFields(static, dynamic, selected):
% ",".join(delta), errors.ECODE_INVAL)
 
 
def _CheckGlobalHvParams(params):
"""Validates that given hypervisor params are not global ones.
def _CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
"""Make sure that none of the given paramters is global.
 
This will ensure that instances don't get customised versions of
global params.
If a global parameter is found, an L{errors.OpPrereqError} exception is
raised. This is used to avoid setting global parameters for individual nodes.
@type params: dictionary
@param params: Parameters to check
@type glob_pars: dictionary
@param glob_pars: Forbidden parameters
@type kind: string
@param kind: Kind of parameters (e.g. "node")
@type bad_levels: string
@param bad_levels: Level(s) at which the parameters are forbidden (e.g.
"instance")
@type good_levels: strings
@param good_levels: Level(s) at which the parameters are allowed (e.g.
"cluster or group")
 
"""
used_globals = constants.HVC_GLOBALS.intersection(params)
used_globals = glob_pars.intersection(params)
if used_globals:
msg = ("The following hypervisor parameters are global and cannot"
" be customized at instance level, please modify them at"
" cluster level: %s" % utils.CommaJoin(used_globals))
msg = ("The following %s parameters are global and cannot"
" be customized at %s level, please modify them at"
" %s level: %s" %
(kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
 
 
......@@ -6129,6 +6143,8 @@ class LUNodeAdd(LogicalUnit):
 
if self.op.ndparams:
utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
_CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
"node", "cluster or group")
 
if self.op.hv_state:
self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
......@@ -6154,9 +6170,6 @@ class LUNodeAdd(LogicalUnit):
if vg_name is not None:
vparams = {constants.NV_PVLIST: [vg_name]}
excl_stor = _IsExclusiveStorageEnabledNode(cfg, self.new_node)
if self.op.ndparams:
excl_stor = self.op.ndparams.get(constants.ND_EXCLUSIVE_STORAGE,
excl_stor)
cname = self.cfg.GetClusterName()
result = rpcrunner.call_node_verify_light([node], vparams, cname)[node]
(errmsgs, _) = _CheckNodePVs(result.payload, excl_stor)
......@@ -6554,6 +6567,8 @@ class LUNodeSetParams(LogicalUnit):
if self.op.ndparams:
new_ndparams = _GetUpdatedParams(self.node.ndparams, self.op.ndparams)
utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
_CheckParamsNotGlobal(self.op.ndparams, constants.NDC_GLOBALS, "node",
"node", "cluster or group")
self.new_ndparams = new_ndparams
 
if self.op.hv_state:
......@@ -10592,7 +10607,8 @@ class LUInstanceCreate(LogicalUnit):
hv_type.CheckParameterSyntax(filled_hvp)
self.hv_full = filled_hvp
# check that we don't specify global parameters on an instance
_CheckGlobalHvParams(self.op.hvparams)
_CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
"instance", "cluster")
 
# fill and remember the beparams dict
self.be_full = _ComputeFullBeParams(self.op, cluster)
......@@ -13290,7 +13306,8 @@ class LUInstanceSetParams(LogicalUnit):
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
 
if self.op.hvparams:
_CheckGlobalHvParams(self.op.hvparams)
_CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
"hypervisor", "instance", "cluster")
 
self.op.disks = self._UpgradeDiskNicMods(
"disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
......
......@@ -2023,6 +2023,10 @@ NDC_DEFAULTS = {
ND_EXCLUSIVE_STORAGE: False,
}
NDC_GLOBALS = compat.UniqueFrozenset([
ND_EXCLUSIVE_STORAGE,
])
DISK_LD_DEFAULTS = {
LD_DRBD8: {
LDP_RESYNC_RATE: CLASSIC_DRBD_SYNC_SPEED,
......
......@@ -38,6 +38,7 @@ pass to and from external parties.
import ConfigParser
import re
import copy
import logging
import time
from cStringIO import StringIO
......@@ -1334,6 +1335,12 @@ class Node(TaggableObject):
if self.ndparams is None:
self.ndparams = {}
# And remove any global parameter
for key in constants.NDC_GLOBALS:
if key in self.ndparams:
logging.warning("Ignoring %s node parameter for node %s",
key, self.name)
del self.ndparams[key]
if self.powered is None:
self.powered = True
......
......@@ -120,7 +120,9 @@ exclusive_storage
When this Boolean flag is enabled, physical disks on the node are
assigned to instance disks in an exclusive manner, so as to lower I/O
interference between instances. See the `Partitioned Ganeti
<design-partitioned.rst>`_ design document for more details.
<design-partitioned.rst>`_ design document for more details. This
parameter cannot be set on individual nodes, as its value must be
the same within each node group.
Hypervisor State Parameters
......
......@@ -452,7 +452,7 @@ def RunExclusiveStorageTests():
node = qa_config.AcquireNode()
try:
old_es = qa_cluster.TestSetExclStorCluster(False)
qa_cluster.TestExclStorSingleNode(node)
qa_node.TestExclStorSingleNode(node)
qa_cluster.TestSetExclStorCluster(True)
qa_cluster.TestExclStorSharedPv(node)
......
......@@ -667,24 +667,6 @@ def TestSetExclStorCluster(newvalue):
return oldvalue
def _BuildSetESCmd(value, node_name):
return ["gnt-node", "modify", "--node-parameters",
"exclusive_storage=%s" % value, node_name]
def TestExclStorSingleNode(node):
"""cluster-verify reports exclusive_storage set only on one node.
"""
node_name = node["primary"]
es_val = _GetBoolClusterField("exclusive_storage")
assert not es_val
AssertCommand(_BuildSetESCmd(True, node_name))
AssertClusterVerify(fail=True, errors=[constants.CV_EGROUPMIXEDESFLAG])
AssertCommand(_BuildSetESCmd("default", node_name))
AssertClusterVerify()
def TestExclStorSharedPv(node):
"""cluster-verify reports LVs that share the same PV with exclusive_storage.
......
......@@ -422,3 +422,22 @@ def TestNodeListFields():
def TestNodeListDrbd(node):
"""gnt-node list-drbd"""
AssertCommand(["gnt-node", "list-drbd", node["primary"]])
def _BuildSetESCmd(action, value, node_name):
cmd = ["gnt-node"]
if action == "add":
cmd.extend(["add", "--readd"])
else:
cmd.append("modify")
cmd.extend(["--node-parameters", "exclusive_storage=%s" % value, node_name])
return cmd
def TestExclStorSingleNode(node):
"""gnt-node add/modify cannot change the exclusive_storage flag.
"""
for action in ["add", "modify"]:
for value in (True, False, "default"):
AssertCommand(_BuildSetESCmd(action, value, node["primary"]), fail=True)
......@@ -351,6 +351,23 @@ class TestNode(unittest.TestCase):
self.assertEqual(node2.disk_state[constants.LD_LV]["lv2082"].total, 512)
self.assertEqual(node2.disk_state[constants.LD_LV]["lv32352"].total, 128)
def testFilterEsNdp(self):
node1 = objects.Node(name="node11673.example.com", ndparams={
constants.ND_EXCLUSIVE_STORAGE: True,
})
node2 = objects.Node(name="node11674.example.com", ndparams={
constants.ND_SPINDLE_COUNT: 3,
constants.ND_EXCLUSIVE_STORAGE: False,
})
self.assertTrue(constants.ND_EXCLUSIVE_STORAGE in node1.ndparams)
node1.UpgradeConfig()
self.assertFalse(constants.ND_EXCLUSIVE_STORAGE in node1.ndparams)
self.assertTrue(constants.ND_EXCLUSIVE_STORAGE in node2.ndparams)
self.assertTrue(constants.ND_SPINDLE_COUNT in node2.ndparams)
node2.UpgradeConfig()
self.assertFalse(constants.ND_EXCLUSIVE_STORAGE in node2.ndparams)
self.assertTrue(constants.ND_SPINDLE_COUNT in node2.ndparams)
if __name__ == "__main__":
testutils.GanetiTestProgram()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment