Commit d2429198 authored by Thomas Thrainer's avatar Thomas Thrainer
Browse files

Merge branch 'stable-2.9' into master



* stable-2.9
  Allow instance mods if only osparams are given
  Use node UUIDs for locking instead of node names
  Allow instance mods if only osparams are given
  Fix a node name vs. UUID bug in instance import
  Typo in hroller man page
  Use node UUID for locking in LUInstanceMove
  Fix harep manpage title
  Fix IPolicy violation check in LUGroupSetParams
  Use FQDN to check master node status
  Revert "Display node name instead of UUID in error message"
  Display node name instead of UUID in error message
  Use node name in error message
  QA: make ipolicy test respect enabled disk templates
  QA: adjust tests wrt to ipolicy disk templates
  ClusterSetParams: move vg-name checks from to CheckPrereq
  man gnt-cluster: mention ipolicy check
  Update NEWS file regarding ipolicy checks
  gnt-group add/modify: ipolicy vs disk templates
  Move Ipolicy utility function to cmdlib/common.py
  bootstrap: restrict ipolicy to enabled disk templates
  gnt-cluster modify: ipolicy vs enabled disk templates
  gnt-cluster modify: factor out ipolicy check

Conflicts:
    lib/cmdlib/group.py (due to moved function, changes already in
                         master)
Signed-off-by: default avatarThomas Thrainer <thomasth@google.com>
Reviewed-by: default avatarMichele Tartara <mtartara@google.com>
parents cf9f3b92 dd84e715
......@@ -33,6 +33,11 @@ Incompatible/important changes
default file and shared file storage directories. It now checks that
the directories are explicitely allowed in the 'file-storage-paths' file and
that the directories exist on all nodes.
- The list of allowed disk templates in the instance policy and the list
of cluster-wide enabled disk templates is now checked for consistency
on cluster or group modification. On cluster initialization, the ipolicy
disk templates are ensured to be a subset of the cluster-wide enabled
disk templates.
New features
~~~~~~~~~~~~
......
......@@ -452,6 +452,26 @@ def _InitCheckEnabledDiskTemplates(enabled_disk_templates):
errors.ECODE_INVAL)
def _RestrictIpolicyToEnabledDiskTemplates(ipolicy, enabled_disk_templates):
"""Restricts the ipolicy's disk templates to the enabled ones.
This function clears the ipolicy's list of allowed disk templates from the
ones that are not enabled by the cluster.
@type ipolicy: dict
@param ipolicy: the instance policy
@type enabled_disk_templates: list of string
@param enabled_disk_templates: the list of cluster-wide enabled disk
templates
"""
assert constants.IPOLICY_DTS in ipolicy
allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
restricted_disk_templates = list(set(allowed_disk_templates)
.intersection(set(enabled_disk_templates)))
ipolicy[constants.IPOLICY_DTS] = restricted_disk_templates
def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
master_netmask, master_netdev, file_storage_dir,
shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
......@@ -595,6 +615,7 @@ def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
objects.NIC.CheckParameterSyntax(nicparams)
full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
_RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates)
if ndparams is not None:
utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
......
......@@ -53,6 +53,7 @@ def AddGroup(opts, args):
minmax_ispecs=opts.ipolicy_bounds_specs,
ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
ipolicy_disk_templates=opts.ipolicy_disk_templates,
group_ipolicy=True)
(group_name,) = args
......@@ -159,7 +160,7 @@ def SetGroupParams(opts, args):
allmods = [opts.ndparams, opts.alloc_policy, opts.diskparams, opts.hv_state,
opts.disk_state, opts.ipolicy_bounds_specs,
opts.ipolicy_vcpu_ratio, opts.ipolicy_spindle_ratio,
opts.diskparams]
opts.diskparams, opts.ipolicy_disk_templates]
if allmods.count(None) == len(allmods):
ToStderr("Please give at least one of the parameters.")
return 1
......
......@@ -56,7 +56,8 @@ from ganeti.cmdlib.common import ShareAll, RunPostHook, \
GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob
ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \
CheckIpolicyVsDiskTemplates
import ganeti.masterd.instance
......@@ -740,16 +741,28 @@ class LUClusterSetParams(LogicalUnit):
unset whether there are instances still using it.
"""
lvm_is_enabled = utils.IsLvmEnabled(enabled_disk_templates)
lvm_gets_enabled = utils.LvmGetsEnabled(enabled_disk_templates,
new_enabled_disk_templates)
current_vg_name = self.cfg.GetVGName()
if self.op.vg_name == '':
if lvm_is_enabled:
raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
" disk templates are or get enabled.")
if self.op.vg_name is None:
if current_vg_name is None and lvm_is_enabled:
raise errors.OpPrereqError("Please specify a volume group when"
" enabling lvm-based disk-templates.")
if self.op.vg_name is not None and not self.op.vg_name:
if self.cfg.HasAnyDiskOfType(constants.LD_LV):
raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
" instances exist", errors.ECODE_INVAL)
if (self.op.vg_name is not None and
utils.IsLvmEnabled(enabled_disk_templates)) or \
(self.cfg.GetVGName() is not None and
utils.LvmGetsEnabled(enabled_disk_templates,
new_enabled_disk_templates)):
if (self.op.vg_name is not None and lvm_is_enabled) or \
(self.cfg.GetVGName() is not None and lvm_gets_enabled):
self._CheckVgNameOnNodes(node_uuids)
def _CheckVgNameOnNodes(self, node_uuids):
......@@ -773,22 +786,71 @@ class LUClusterSetParams(LogicalUnit):
(self.cfg.GetNodeName(node_uuid), vgstatus),
errors.ECODE_ENVIRON)
def _GetEnabledDiskTemplates(self, cluster):
@staticmethod
def _GetEnabledDiskTemplatesInner(op_enabled_disk_templates,
old_enabled_disk_templates):
"""Determines the enabled disk templates and the subset of disk templates
that are newly enabled by this operation.
"""
enabled_disk_templates = None
new_enabled_disk_templates = []
if self.op.enabled_disk_templates:
enabled_disk_templates = self.op.enabled_disk_templates
if op_enabled_disk_templates:
enabled_disk_templates = op_enabled_disk_templates
new_enabled_disk_templates = \
list(set(enabled_disk_templates)
- set(cluster.enabled_disk_templates))
- set(old_enabled_disk_templates))
else:
enabled_disk_templates = cluster.enabled_disk_templates
enabled_disk_templates = old_enabled_disk_templates
return (enabled_disk_templates, new_enabled_disk_templates)
def _GetEnabledDiskTemplates(self, cluster):
"""Determines the enabled disk templates and the subset of disk templates
that are newly enabled by this operation.
"""
return self._GetEnabledDiskTemplatesInner(self.op.enabled_disk_templates,
cluster.enabled_disk_templates)
def _CheckIpolicy(self, cluster, enabled_disk_templates):
"""Checks the ipolicy.
@type cluster: C{objects.Cluster}
@param cluster: the cluster's configuration
@type enabled_disk_templates: list of string
@param enabled_disk_templates: list of (possibly newly) enabled disk
templates
"""
# FIXME: write unit tests for this
if self.op.ipolicy:
self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
group_policy=False)
CheckIpolicyVsDiskTemplates(self.new_ipolicy,
enabled_disk_templates)
all_instances = self.cfg.GetAllInstancesInfo().values()
violations = set()
for group in self.cfg.GetAllNodeGroupsInfo().values():
instances = frozenset([inst for inst in all_instances
if compat.any(nuuid in group.members
for nuuid in inst.all_nodes)])
new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
self.cfg)
if new:
violations.update(new)
if violations:
self.LogWarning("After the ipolicy change the following instances"
" violate them: %s",
utils.CommaJoin(utils.NiceSort(violations)))
else:
CheckIpolicyVsDiskTemplates(cluster.ipolicy,
enabled_disk_templates)
def CheckPrereq(self):
"""Check prerequisites.
......@@ -873,27 +935,7 @@ class LUClusterSetParams(LogicalUnit):
for name, values in svalues.items()))
for storage, svalues in new_disk_state.items())
if self.op.ipolicy:
self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
group_policy=False)
all_instances = self.cfg.GetAllInstancesInfo().values()
violations = set()
for group in self.cfg.GetAllNodeGroupsInfo().values():
instances = frozenset([inst for inst in all_instances
if compat.any(nuuid in group.members
for nuuid in inst.all_nodes)])
new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
self.cfg)
if new:
violations.update(new)
if violations:
self.LogWarning("After the ipolicy change the following instances"
" violate them: %s",
utils.CommaJoin(utils.NiceSort(violations)))
self._CheckIpolicy(cluster, enabled_disk_templates)
if self.op.nicparams:
utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
......@@ -1049,26 +1091,14 @@ class LUClusterSetParams(LogicalUnit):
"""
if self.op.vg_name is not None:
if self.op.vg_name and not \
utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
feedback_fn("Note that you specified a volume group, but did not"
" enable any lvm disk template.")
new_volume = self.op.vg_name
if not new_volume:
if utils.IsLvmEnabled(self.cluster.enabled_disk_templates):
raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
" disk templates are enabled.")
new_volume = None
if new_volume != self.cfg.GetVGName():
self.cfg.SetVGName(new_volume)
else:
feedback_fn("Cluster LVM configuration already in desired"
" state, not changing")
else:
if utils.IsLvmEnabled(self.cluster.enabled_disk_templates) and \
not self.cfg.GetVGName():
raise errors.OpPrereqError("Please specify a volume group when"
" enabling lvm-based disk-templates.")
def _SetFileStorageDir(self, feedback_fn):
"""Set the file storage directory.
......
......@@ -1116,3 +1116,24 @@ def CheckStorageTypeEnabled(cluster, storage_type):
" enabled in this cluster. Enabled disk"
" templates are: %s" % (storage_type,
",".join(cluster.enabled_disk_templates)))
def CheckIpolicyVsDiskTemplates(ipolicy, enabled_disk_templates):
"""Checks ipolicy disk templates against enabled disk tempaltes.
@type ipolicy: dict
@param ipolicy: the new ipolicy
@type enabled_disk_templates: list of string
@param enabled_disk_templates: list of enabled disk templates on the
cluster
@raises errors.OpPrereqError: if there is at least one allowed disk
template that is not also enabled.
"""
assert constants.IPOLICY_DTS in ipolicy
allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates)
if not_enabled:
raise errors.OpPrereqError("The following disk template are allowed"
" by the ipolicy, but not enabled on the"
" cluster: %s" % utils.CommaJoin(not_enabled))
......@@ -38,7 +38,8 @@ from ganeti.cmdlib.common import MergeAndVerifyHvState, \
MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
CheckNodeGroupInstances, GetUpdatedIPolicy, \
ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceLvsToNodes
CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceLvsToNodes, \
CheckIpolicyVsDiskTemplates
import ganeti.masterd.instance
......@@ -59,6 +60,21 @@ class LUGroupAdd(LogicalUnit):
self.needed_locks = {}
self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
def _CheckIpolicy(self):
"""Checks the group's ipolicy for consistency and validity.
"""
if self.op.ipolicy:
cluster = self.cfg.GetClusterInfo()
full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
try:
objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
except errors.ConfigurationError, err:
raise errors.OpPrereqError("Invalid instance policy: %s" % err,
errors.ECODE_INVAL)
CheckIpolicyVsDiskTemplates(full_ipolicy,
cluster.enabled_disk_templates)
def CheckPrereq(self):
"""Check prerequisites.
......@@ -103,14 +119,7 @@ class LUGroupAdd(LogicalUnit):
else:
self.new_diskparams = {}
if self.op.ipolicy:
cluster = self.cfg.GetClusterInfo()
full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
try:
objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
except errors.ConfigurationError, err:
raise errors.OpPrereqError("Invalid instance policy: %s" % err,
errors.ECODE_INVAL)
self._CheckIpolicy()
def BuildHooksEnv(self):
"""Build hooks env.
......@@ -427,6 +436,37 @@ class LUGroupSetParams(LogicalUnit):
utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
return new_params
def _CheckIpolicy(self, cluster, owned_instance_names):
"""Sanity checks for the ipolicy.
@type cluster: C{objects.Cluster}
@param cluster: the cluster's configuration
@type owned_instance_names: list of string
@param owned_instance_names: list of instances
"""
if self.op.ipolicy:
self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
self.op.ipolicy,
group_policy=True)
new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
CheckIpolicyVsDiskTemplates(new_ipolicy,
cluster.enabled_disk_templates)
instances = \
dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
gmi = ganeti.masterd.instance
violations = \
ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
self.group),
new_ipolicy, instances.values(),
self.cfg)
if violations:
self.LogWarning("After the ipolicy change the following instances"
" violate them: %s",
utils.CommaJoin(violations))
def CheckPrereq(self):
"""Check prerequisites.
......@@ -475,25 +515,7 @@ class LUGroupSetParams(LogicalUnit):
MergeAndVerifyDiskState(self.op.disk_state,
self.group.disk_state_static)
if self.op.ipolicy:
self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
self.op.ipolicy,
group_policy=True)
new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
instances = \
dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
gmi = ganeti.masterd.instance
violations = \
ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
self.group),
new_ipolicy, instances.values(),
self.cfg)
if violations:
self.LogWarning("After the ipolicy change the following instances"
" violate them: %s",
utils.CommaJoin(violations))
self._CheckIpolicy(cluster, owned_instance_names)
def BuildHooksEnv(self):
"""Build hooks env.
......
......@@ -602,7 +602,7 @@ class LUInstanceCreate(LogicalUnit):
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
if not os.path.isabs(src_path):
self.op.src_path = src_path = \
self.op.src_path = \
utils.PathJoin(pathutils.EXPORT_DIR, src_path)
self.needed_locks[locking.LEVEL_NODE_RES] = \
......@@ -710,13 +710,13 @@ class LUInstanceCreate(LogicalUnit):
locked_nodes = self.owned_locks(locking.LEVEL_NODE)
exp_list = self.rpc.call_export_list(locked_nodes)
found = False
for node in exp_list:
if exp_list[node].fail_msg:
for node_uuid in exp_list:
if exp_list[node_uuid].fail_msg:
continue
if self.op.src_path in exp_list[node].payload:
if self.op.src_path in exp_list[node_uuid].payload:
found = True
self.op.src_node = node
self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
self.op.src_node_uuid = node_uuid
self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
self.op.src_path)
break
......@@ -770,8 +770,10 @@ class LUInstanceCreate(LogicalUnit):
if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
ndict = {}
for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
ndict[name] = v
nic_param_name = "nic%d_%s" % (idx, name)
if einfo.has_option(constants.INISECT_INS, nic_param_name):
v = einfo.get(constants.INISECT_INS, nic_param_name)
ndict[name] = v
nics.append(ndict)
else:
break
......@@ -1679,7 +1681,7 @@ class LUInstanceMove(LogicalUnit):
(self.op.target_node_uuid, self.op.target_node) = \
ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
self.op.target_node)
self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
......@@ -1934,11 +1936,11 @@ class LUInstanceMultiAlloc(NoHooksLU):
for inst in self.op.instances:
(inst.pnode_uuid, inst.pnode) = \
ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
nodeslist.append(inst.pnode)
nodeslist.append(inst.pnode_uuid)
if inst.snode is not None:
(inst.snode_uuid, inst.snode) = \
ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
nodeslist.append(inst.snode)
nodeslist.append(inst.snode_uuid)
self.needed_locks[locking.LEVEL_NODE] = nodeslist
# Lock resources of instance's primary and secondary nodes (copy to
......@@ -2387,8 +2389,8 @@ class LUInstanceSetParams(LogicalUnit):
def CheckArguments(self):
if not (self.op.nics or self.op.disks or self.op.disk_template or
self.op.hvparams or self.op.beparams or self.op.os_name or
self.op.offline is not None or self.op.runtime_mem or
self.op.pnode):
self.op.osparams or self.op.offline is not None or
self.op.runtime_mem or self.op.pnode):
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
if self.op.hvparams:
......
......@@ -204,8 +204,9 @@ def CheckNodeNotDrained(lu, node_uuid):
@raise errors.OpPrereqError: if the node is drained
"""
if lu.cfg.GetNodeInfo(node_uuid).drained:
raise errors.OpPrereqError("Can't use drained node %s" % node_uuid,
node = lu.cfg.GetNodeInfo(node_uuid)
if node.drained:
raise errors.OpPrereqError("Can't use drained node %s" % node.name,
errors.ECODE_STATE)
......
......@@ -539,11 +539,12 @@ Please note, that ``std`` values are not the same as defaults set by
- ``--specs-mem-size`` limits the amount of memory available
- ``--specs-nic-count`` sets limits on the number of NICs used
The ``--ipolicy-disk-templates`` and ``--ipolicy-spindle-ratio`` options
take a decimal number. The ``--ipolicy-disk-templates`` option takes a
comma-separated list of disk templates.
The ``--ipolicy-spindle-ratio`` option takes a decimal number. The
``--ipolicy-disk-templates`` option takes a comma-separated list of disk
templates. This list of disk templates must be a subset of the list
of cluster-wide enabled disk templates (which can be set with
``--enabled-disk-templates``).
- ``--ipolicy-disk-templates`` limits the allowed disk templates
- ``--ipolicy-spindle-ratio`` limits the instances-spindles ratio
- ``--ipolicy-vcpu-ratio`` limits the vcpu-cpu ratio
......
HAREP(1) Ganeti | Version @GANETI_VERSION@
=========================================
==========================================
NAME
----
......
......@@ -45,7 +45,7 @@ hroller is a cluster maintenance reboot scheduler. It can calculate
which set of nodes can be rebooted at the same time while avoiding
having both primary and secondary nodes being rebooted at the same time.
For backends that support identifying the master node (currenlty
For backends that support identifying the master node (currently
RAPI and LUXI), the master node is scheduled as the last node
in the last reboot group. Apart from this restriction, larger reboot
groups are put first.
......
......@@ -578,8 +578,11 @@ def _RestoreEnabledDiskTemplates():
other tests.
"""
cmd = ["gnt-cluster", "modify", "--enabled-disk-templates=%s" %
",".join(qa_config.GetEnabledDiskTemplates())]
enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
cmd = ["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % ",".join(enabled_disk_templates),
"--ipolicy-disk-templates=%s" % ",".join(enabled_disk_templates),
]
if utils.IsLvmEnabled(qa_config.GetEnabledDiskTemplates()):
vgname = qa_config.get("vg-name", constants.DEFAULT_VG)
......@@ -606,7 +609,8 @@ def _TestClusterModifyDiskTemplatesArguments(default_disk_template,
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s,%s" %
(default_disk_template, default_disk_template)],
(default_disk_template, default_disk_template),
"--ipolicy-disk-templates=%s" % default_disk_template],
fail=False)
if constants.DT_DRBD8 in enabled_disk_templates:
......@@ -618,12 +622,15 @@ def _TestClusterModifyDiskTemplatesArguments(default_disk_template,
# has to be installed on the nodes in this case
AssertCommand(["gnt-cluster", "modify",
"--drbd-usermode-helper=%s" % drbd_usermode_helper,
"--enabled-disk-templates=%s" % constants.DT_DISKLESS],
"--enabled-disk-templates=%s" % constants.DT_DISKLESS,
"--ipolicy-disk-templates=%s" % constants.DT_DISKLESS],
fail=False)
# specifying a helper when drbd is re-enabled
AssertCommand(["gnt-cluster", "modify",
"--drbd-usermode-helper=%s" % drbd_usermode_helper,
"--enabled-disk-templates=%s" %
",".join(enabled_disk_templates),
"--ipolicy-disk-templates=%s" %
",".join(enabled_disk_templates)],
fail=False)
......@@ -650,13 +657,15 @@ def _TestClusterModifyDiskTemplatesVgName(enabled_disk_templates):
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % non_lvm_template,
"--ipolicy-disk-templates=%s" % non_lvm_template,
"--vg-name="],
fail=False)
# Try to enable lvm, when no volume group is given
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % lvm_template],
"--enabled-disk-templates=%s" % lvm_template,
"--ipolicy-disk-templates=%s" % lvm_template],
fail=True)
# Set volume group, with lvm still disabled: just a warning
......@@ -666,13 +675,15 @@ def _TestClusterModifyDiskTemplatesVgName(enabled_disk_templates):
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % lvm_template,
"--ipolicy-disk-templates=%s" % lvm_template,
"--vg-name="],
fail=True)
# Enable lvm with vg name present
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % lvm_template],
"--enabled-disk-templates=%s" % lvm_template,
"--ipolicy-disk-templates=%s" % lvm_template],
fail=False)
# Try unsetting vg name with lvm still enabled
......@@ -680,7 +691,10 @@ def _TestClusterModifyDiskTemplatesVgName(enabled_disk_templates):
# Disable lvm with vg name still set
AssertCommand(
["gnt-cluster", "modify", "--enabled-disk-templates=%s" % non_lvm_template],
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % non_lvm_template,
"--ipolicy-disk-templates=%s" % non_lvm_template,
],
fail=False)
# Try unsetting vg name with lvm disabled
......@@ -690,6 +704,7 @@ def _TestClusterModifyDiskTemplatesVgName(enabled_disk_templates):
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % lvm_template,
"--ipolicy-disk-templates=%s" % lvm_template,
"--vg-name=%s" % vgname],
fail=False)
......@@ -697,6 +712,7 @@ def _TestClusterModifyDiskTemplatesVgName(enabled_disk_templates):
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % non_lvm_template,
"--ipolicy-disk-templates=%s" % non_lvm_template,
"--vg-name="],
fail=False)
......@@ -719,8 +735,8 @@ def _TestClusterModifyUsedDiskTemplate(instance_template,
- set([instance_template]))
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" %
",".join(new_disk_templates)],
"--enabled-disk-templates=%s" % ",".join(new_disk_templates),
"--ipolicy-disk-templates=%s" % ",".join(new_disk_templates)],
fail=True)
......@@ -733,14 +749,14 @@ def _TestClusterModifyUnusedDiskTemplate(instance_template):
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" %
",".