Commit 735e1318 authored by Michael Hanselmann's avatar Michael Hanselmann
Browse files

gnt-instance recreate-disks: Allow specifying new size

With this change a new disk size can be specified when recreating disks.
Signed-off-by: default avatarMichael Hanselmann <>
Reviewed-by: default avatarIustin Pop <>
parent cf572b13
......@@ -9,6 +9,9 @@ Version 2.6.0 beta1
- Deprecated ``admin_up`` field. Instead, ``admin_state`` is introduced,
with 3 possible values -- ``up``, ``down`` and ``offline``.
- Replaced ``--disks`` option of ``gnt-instance replace-disks`` with a
more flexible ``--disk`` option. Now disk size and mode can be changed
upon recreation.
Version 2.5.0 rc5
......@@ -39,6 +39,7 @@ from ganeti import errors
from ganeti import netutils
from ganeti import ssh
from ganeti import objects
from ganeti import ht
_EXPAND_CLUSTER = "cluster"
......@@ -601,14 +602,29 @@ def RecreateDisks(opts, args):
instance_name = args[0]
disks = []
if opts.disks:
opts.disks = [int(v) for v in opts.disks.split(",")]
except (ValueError, TypeError), err:
ToStderr("Invalid disks value: %s" % str(err))
return 1
opts.disks = []
for didx, ddict in opts.disks:
didx = int(didx)
if not ht.TDict(ddict):
msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
raise errors.OpPrereqError(msg)
if constants.IDISK_SIZE in ddict:
ddict[constants.IDISK_SIZE] = \
except ValueError, err:
raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
(didx, err))
disks.append((didx, ddict))
# TODO: Verify modifyable parameters (already done in
# LUInstanceRecreateDisks, but it'd be nice to have in the client)
if opts.node:
pnode, snode = SplitNodeOption(opts.node)
......@@ -619,9 +635,9 @@ def RecreateDisks(opts, args):
nodes = []
op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name,
disks=disks, nodes=nodes)
SubmitOrSend(op, opts)
return 0
......@@ -1545,7 +1561,7 @@ commands = {
"[-f] <instance>", "Deactivate an instance's disks"),
"recreate-disks": (
"<instance>", "Recreate an instance's disks"),
"grow-disk": (
......@@ -6901,9 +6901,39 @@ class LUInstanceRecreateDisks(LogicalUnit):
REQ_BGL = False
_MODIFYABLE = frozenset([
# New or changed disk parameters may have different semantics
assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
# TODO: Implement support changing VG while recreating
def CheckArguments(self):
# normalise the disk list
self.op.disks = sorted(frozenset(self.op.disks))
if self.op.disks and ht.TPositiveInt(self.op.disks[0]):
# Normalize and convert deprecated list of disk indices
self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
if duplicates:
raise errors.OpPrereqError("Some disks have been specified more than"
" once: %s" % utils.CommaJoin(duplicates),
for (idx, params) in self.op.disks:
utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
unsupported = frozenset(params.keys()) - self._MODIFYABLE
if unsupported:
raise errors.OpPrereqError("Parameters for disk %s try to change"
" unmodifyable parameter(s): %s" %
(idx, utils.CommaJoin(unsupported)),
def ExpandNames(self):
......@@ -6969,6 +6999,7 @@ class LUInstanceRecreateDisks(LogicalUnit):
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name, errors.ECODE_INVAL)
# if we replace nodes *and* the old primary is offline, we don't
# check
assert instance.primary_node in self.owned_locks(locking.LEVEL_NODE)
......@@ -6978,17 +7009,22 @@ class LUInstanceRecreateDisks(LogicalUnit):
_CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
msg="cannot recreate disks")
if not self.op.disks:
self.op.disks = range(len(instance.disks))
if self.op.disks:
self.disks = dict(self.op.disks)
for idx in self.op.disks:
if idx >= len(instance.disks):
raise errors.OpPrereqError("Invalid disk index '%s'" % idx,
if self.op.disks != range(len(instance.disks)) and self.op.nodes:
self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
maxidx = max(self.disks.keys())
if maxidx >= len(instance.disks):
raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
if (self.op.nodes and
sorted(self.disks.keys()) != range(len(instance.disks))):
raise errors.OpPrereqError("Can't recreate disks partially and"
" change the nodes at the same time",
self.instance = instance
def Exec(self, feedback_fn):
......@@ -7001,30 +7037,42 @@ class LUInstanceRecreateDisks(LogicalUnit):
to_skip = []
mods = [] # keeps track of needed logical_id changes
mods = [] # keeps track of needed changes
for idx, disk in enumerate(instance.disks):
if idx not in self.op.disks: # disk idx has not been passed in
changes = self.disks[idx]
except KeyError:
# Disk should not be recreated
# update secondaries for disks, if needed
if self.op.nodes:
if disk.dev_type == constants.LD_DRBD8:
# need to update the nodes and minors
assert len(self.op.nodes) == 2
assert len(disk.logical_id) == 6 # otherwise disk internals
# have changed
(_, _, old_port, _, _, old_secret) = disk.logical_id
new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes,
new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
new_minors[0], new_minors[1], old_secret)
assert len(disk.logical_id) == len(new_id)
mods.append((idx, new_id))
if self.op.nodes and disk.dev_type == constants.LD_DRBD8:
# need to update the nodes and minors
assert len(self.op.nodes) == 2
assert len(disk.logical_id) == 6 # otherwise disk internals
# have changed
(_, _, old_port, _, _, old_secret) = disk.logical_id
new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes,
new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
new_minors[0], new_minors[1], old_secret)
assert len(disk.logical_id) == len(new_id)
new_id = None
mods.append((idx, new_id, changes))
# now that we have passed all asserts above, we can apply the mods
# in a single run (to avoid partial changes)
for idx, new_id in mods:
instance.disks[idx].logical_id = new_id
for idx, new_id, changes in mods:
disk = instance.disks[idx]
if new_id is not None:
assert disk.dev_type == constants.LD_DRBD8
disk.logical_id = new_id
if changes:
disk.Update(size=changes.get(constants.IDISK_SIZE, None),
mode=changes.get(constants.IDISK_MODE, None))
# change primary node, if needed
if self.op.nodes:
......@@ -730,6 +730,21 @@ class Disk(ConfigObject):
raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
" disk type %s" % self.dev_type)
def Update(self, size=None, mode=None):
"""Apply changes to size and mode.
if self.dev_type == constants.LD_DRBD8:
if self.children:
self.children[0].Update(size=size, mode=mode)
assert not self.children
if size is not None:
self.size = size
if mode is not None:
self.mode = mode
def UnsetSize(self):
"""Sets recursively the size to zero for the disk and its children.
......@@ -180,6 +180,11 @@ _TSetParamsResult = \
# TODO: Generate check from constants.IDISK_PARAMS_TYPES (however, not all users
# of this check support all parameters)
_TDiskParams = ht.TDictOf(ht.TElemOf(constants.IDISK_PARAMS),
ht.TOr(ht.TNonEmptyString, ht.TInt))
"CLUSTER_": "C_",
"GROUP_": "G_",
......@@ -1089,10 +1094,7 @@ class OpInstanceCreate(OpCode):
("beparams", ht.EmptyDict, ht.TDict, "Backend parameters for instance"),
("disks", ht.NoDefault,
# TODO: Generate check from constants.IDISK_PARAMS_TYPES
ht.TOr(ht.TNonEmptyString, ht.TInt))),
("disks", ht.NoDefault, ht.TListOf(_TDiskParams),
"Disk descriptions, for example ``[{\"%s\": 100}, {\"%s\": 5}]``;"
" each disk definition must contain a ``%s`` value and"
" can contain an optional ``%s`` value denoting the disk access mode"
......@@ -1323,11 +1325,18 @@ class OpInstanceDeactivateDisks(OpCode):
class OpInstanceRecreateDisks(OpCode):
"""Recreate an instance's disks."""
_TDiskChanges = \
ht.TItems([ht.Comment("Disk index")(ht.TPositiveInt),
OP_DSC_FIELD = "instance_name"
("disks", ht.EmptyList, ht.TListOf(ht.TPositiveInt),
"List of disk indexes"),
("disks", ht.EmptyList,
ht.TOr(ht.TListOf(ht.TPositiveInt), ht.TListOf(_TDiskChanges)),
"List of disk indexes (deprecated) or a list of tuples containing a disk"
" index and a possibly empty dictionary with disk parameter changes"),
("nodes", ht.EmptyList, ht.TListOf(ht.TNonEmptyString),
"New instance nodes, if relocation is desired"),
......@@ -1365,26 +1365,31 @@ instance.
**recreate-disks** [--submit] [--disks=``indices``] [-n node1:[node2]]
| **recreate-disks** [--submit] [-n node1:[node2]]
| [--disk=*N*[:[size=*VAL*][,mode=*ro\|rw*]]] {*instance*}
Recreates the disks of the given instance, or only a subset of the
disks (if the option ``disks`` is passed, which must be a
comma-separated list of disk indices, starting from zero).
Recreates all or a subset of disks of the given instance.
Note that this functionality should only be used for missing disks; if
any of the given disks already exists, the operation will fail. While
this is suboptimal, recreate-disks should hopefully not be needed in
normal operation and as such the impact of this is low.
If only a subset should be recreated, any number of ``disk`` options can
be specified. It expects a disk index and an optional list of disk
parameters to change. Only ``size`` and ``mode`` can be changed while
recreating disks. To recreate all disks while changing parameters on
a subset only, a ``--disk`` option must be given for every disk of the
Optionally the instance's disks can be recreated on different
nodes. This can be useful if, for example, the original nodes of the
instance have gone down (and are marked offline), so we can't recreate
on the same nodes. To do this, pass the new node(s) via ``-n`` option,
with a syntax similar to the **add** command. The number of nodes
passed must equal the number of nodes that the instance currently
has. Note that changing nodes is only allowed for 'all disk'
replacement (when ``--disks`` is not passed).
has. Note that changing nodes is only allowed when all disks are
replaced, e.g. when no ``--disk`` option is passed.
The ``--submit`` option is used to send the job to the master daemon
but not wait for its completion. The job ID will be shown so that it
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment