Commit a365b47f authored by Bernardo Dal Seno's avatar Bernardo Dal Seno

Merge branch 'stable-2.7' into stable-2.8

* stable-2.7:
  Update NEWS with disk creation fixes
  Don't fail to deactivate master IP if already down
  Add QA for recreating single instance disks
  Add QA for gnt-instance modify --disk
  Clean up when "gnt-instance modify" fails to create a disk
  recreate-disks honors the prealloc_wipe_disks flag
  Introduce wrapper for cmdlib._WipeDisks()
  Don't catch an exception that cannot be raised
  Wipe disks added through "gnt-instance modify"
  Support /var/run being a symlink in upload
  Final NEWS and configure.ac update for 2.7.0~rc1
  gnt-job list: deal with non-ascii encoding in jobs

Conflicts:
	NEWS
	lib/cmdlib.py
	qa/ganeti-qa.py
	qa/qa-sample.json

NEWS, qa/ganeti-qa.py and qa/qa-sample.py had trivial conflicts. But I've
updated QA changes to use the new interfaces. lib/cmdlib.py was renamed and
split, so I had to semi-manually apply the changes to the new files. I had
to change the names of some functions by removing or adding the initial
underscore and update the imported names.
Signed-off-by: default avatarBernardo Dal Seno <bdalseno@google.com>
Reviewed-by: default avatarThomas Thrainer <thomasth@google.com>
parents aad242d2 ef726a3f
......@@ -24,11 +24,28 @@ Version 2.8.0 beta1
configuration back to the previous stable version.
Version 2.7.0 rc1
Version 2.7.0 rc2
-----------------
*(unreleased)*
- ``devel/upload`` now works when ``/var/run`` on the target nodes is a
symlink.
- Disks added through ``gnt-instance modify`` or created through
``gnt-instance recreate-disks`` are wiped, if the
``prealloc_wipe_disks`` flag is set.
- If wiping newly created disks fails, the disks are removed. Also,
partial failures in creating disks through ``gnt-instance modify``
triggers a cleanup of the partially-created disks.
- Removing the master IP address doesn't fail if the address has been
already removed.
Version 2.7.0 rc1
-----------------
*(Released Fri, 3 May 2013)*
Incompatible/important changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
......@@ -149,6 +166,8 @@ Since beta3:
- Disable inter-cluster instance move for file-based instances, because
it is dependant on instance export, which is not supported for
file-based instances. (Issue 414)
- Fix gnt-job crashes on non-ascii characters (Issue 427)
- Fix volume group checks on non-vm-capable nodes (Issue 432)
Version 2.7.0 beta3
......
......@@ -2,7 +2,7 @@
m4_define([gnt_version_major], [2])
m4_define([gnt_version_minor], [7])
m4_define([gnt_version_revision], [0])
m4_define([gnt_version_suffix], [~beta3])
m4_define([gnt_version_suffix], [~rc1])
m4_define([gnt_version_full],
m4_format([%d.%d.%d%s],
gnt_version_major, gnt_version_minor,
......
......@@ -130,7 +130,7 @@ echo ---
# and now put it under $prefix on the target node(s)
for host; do
echo Uploading code to ${host}...
rsync -v -rlDc \
rsync -v -rlKDc \
-e "ssh -oBatchMode=yes" \
--exclude="*.py[oc]" --exclude="*.pdf" --exclude="*.html" \
"$TXD/" \
......
......@@ -60,9 +60,16 @@ def _FormatStatus(value):
raise errors.ProgrammerError("Unknown job status code '%s'" % value)
def _FormatSummary(value):
"""Formats a job's summary. Takes possible non-ascii encoding into account.
"""
return ','.encode('utf-8').join(item.encode('utf-8') for item in value)
_JOB_LIST_FORMAT = {
"status": (_FormatStatus, False),
"summary": (lambda value: ",".join(str(item) for item in value), False),
"summary": (_FormatSummary, False),
}
_JOB_LIST_FORMAT.update(dict.fromkeys(["opstart", "opexec", "opend"],
(lambda value: map(FormatTimestamp,
......
......@@ -51,11 +51,10 @@ from ganeti.cmdlib.common import INSTANCE_DOWN, \
AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
from ganeti.cmdlib.instance_storage import CreateDisks, \
CheckNodesFreeDiskPerVG, WipeDisks, WaitForSync, \
CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
CreateBlockDev, StartInstanceDisks, ShutdownInstanceDisks, \
AssembleInstanceDisks
StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
......@@ -3102,24 +3101,13 @@ class LUInstanceSetParams(LogicalUnit):
[params], file_path, file_driver, idx,
self.Log, self.diskparams)[0]
info = GetInstanceInfoText(instance)
logging.info("Creating volume %s for instance %s",
disk.iv_name, instance.name)
# Note: this needs to be kept in sync with _CreateDisks
#HARDCODE
for node in instance.all_nodes:
f_create = (node == instance.primary_node)
try:
CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
except errors.OpExecError, err:
self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
disk.iv_name, disk, node, err)
new_disks = CreateDisks(self, instance, disks=[disk])
if self.cluster.prealloc_wipe_disks:
# Wipe new disk
WipeDisks(self, instance,
disks=[(idx, disk, 0)])
WipeOrCleanupDisks(self, instance,
disks=[(idx, disk, 0)],
cleanup=new_disks)
return (disk, [
("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
......
......@@ -179,7 +179,7 @@ def IsExclusiveStorageEnabledNodeName(cfg, nodename):
return IsExclusiveStorageEnabledNode(cfg, ni)
def CreateBlockDev(lu, node, instance, device, force_create, info,
def _CreateBlockDev(lu, node, instance, device, force_create, info,
force_open):
"""Wrapper around L{_CreateBlockDevInner}.
......@@ -192,7 +192,26 @@ def CreateBlockDev(lu, node, instance, device, force_create, info,
force_open, excl_stor)
def CreateDisks(lu, instance, to_skip=None, target_node=None):
def _UndoCreateDisks(lu, disks_created):
"""Undo the work performed by L{CreateDisks}.
This function is called in case of an error to undo the work of
L{CreateDisks}.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@param disks_created: the result returned by L{CreateDisks}
"""
for (node, disk) in disks_created:
lu.cfg.SetDiskID(disk, node)
result = lu.rpc.call_blockdev_remove(node, disk)
if result.fail_msg:
logging.warning("Failed to remove newly-created disk %s on node %s:"
" %s", disk, node, result.fail_msg)
def CreateDisks(lu, instance, to_skip=None, target_node=None, disks=None):
"""Create all disks for an instance.
This abstracts away some work from AddInstance.
......@@ -205,8 +224,12 @@ def CreateDisks(lu, instance, to_skip=None, target_node=None):
@param to_skip: list of indices to skip
@type target_node: string
@param target_node: if passed, overrides the target node for creation
@rtype: boolean
@return: the success of the creation
@type disks: list of {objects.Disk}
@param disks: the disks to create; if not specified, all the disks of the
instance are created
@return: information about the created disks, to be used to call
L{_UndoCreateDisks}
@raise errors.OpPrereqError: in case of error
"""
info = GetInstanceInfoText(instance)
......@@ -217,6 +240,9 @@ def CreateDisks(lu, instance, to_skip=None, target_node=None):
pnode = target_node
all_nodes = [pnode]
if disks is None:
disks = instance.disks
if instance.disk_template in constants.DTS_FILEBASED:
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
......@@ -225,32 +251,22 @@ def CreateDisks(lu, instance, to_skip=None, target_node=None):
" node %s" % (file_storage_dir, pnode))
disks_created = []
# Note: this needs to be kept in sync with adding of disks in
# LUInstanceSetParams
for idx, device in enumerate(instance.disks):
for idx, device in enumerate(disks):
if to_skip and idx in to_skip:
continue
logging.info("Creating disk %s for instance '%s'", idx, instance.name)
#HARDCODE
for node in all_nodes:
f_create = node == pnode
try:
CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
_CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
disks_created.append((node, device))
except errors.OpExecError:
logging.warning("Creating disk %s for instance '%s' failed",
idx, instance.name)
except errors.DeviceCreationError, e:
logging.warning("Creating disk %s for instance '%s' failed",
idx, instance.name)
disks_created.extend(e.created_devices)
for (node, disk) in disks_created:
lu.cfg.SetDiskID(disk, node)
result = lu.rpc.call_blockdev_remove(node, disk)
if result.fail_msg:
logging.warning("Failed to remove newly-created disk %s on node %s:"
" %s", device, node, result.fail_msg)
_UndoCreateDisks(lu, disks_created)
raise errors.OpExecError(e.message)
return disks_created
def ComputeDiskSizePerVG(disk_template, disks):
......@@ -800,7 +816,14 @@ class LUInstanceRecreateDisks(LogicalUnit):
# All touched nodes must be locked
mylocks = self.owned_locks(locking.LEVEL_NODE)
assert mylocks.issuperset(frozenset(instance.all_nodes))
CreateDisks(self, instance, to_skip=to_skip)
new_disks = CreateDisks(self, instance, to_skip=to_skip)
# TODO: Release node locks before wiping, or explain why it's not possible
if self.cfg.GetClusterInfo().prealloc_wipe_disks:
wipedisks = [(idx, disk, 0)
for (idx, disk) in enumerate(instance.disks)
if idx not in to_skip]
WipeOrCleanupDisks(self, instance, disks=wipedisks, cleanup=new_disks)
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
......@@ -990,6 +1013,28 @@ def WipeDisks(lu, instance, disks=None):
" failed", idx, instance.name)
def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None):
"""Wrapper for L{WipeDisks} that handles errors.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should wipe
@param disks: see L{WipeDisks}
@param cleanup: the result returned by L{CreateDisks}, used for cleanup in
case of error
@raise errors.OpPrereqError: in case of failure
"""
try:
WipeDisks(lu, instance, disks=disks)
except errors.OpExecError:
logging.warning("Wiping disks for instance '%s' failed",
instance.name)
_UndoCreateDisks(lu, cleanup)
raise
def ExpandCheckDisks(instance, disks):
"""Return the instance disks selected by the disks list
......
......@@ -692,6 +692,8 @@ def RunInstanceTests():
RunTest(qa_instance.TestInstanceConvertDiskToPlain,
instance, inodes)
RunTest(qa_instance.TestInstanceStartup, instance)
RunTestIf("instance-modify-disks",
qa_instance.TestInstanceModifyDisks, instance)
RunCommonInstanceTests(instance)
if qa_config.TestEnabled("instance-modify-primary"):
othernode = qa_config.AcquireNode()
......
......@@ -203,6 +203,7 @@
"instance-migrate": true,
"instance-modify": true,
"instance-modify-primary": true,
"instance-modify-disks": false,
"instance-reboot": true,
"instance-reinstall": true,
"instance-rename": true,
......
......@@ -375,6 +375,10 @@ def IsDiskReplacingSupported(instance):
return instance.disk_template == constants.DT_DRBD8
def IsDiskSupported(instance):
return instance.disk_template != constants.DT_DISKLESS
def TestInstanceAddWithPlainDisk(nodes, fail=False):
"""gnt-instance add -t plain"""
if constants.DT_PLAIN in qa_config.GetEnabledDiskTemplates():
......@@ -773,6 +777,20 @@ def TestInstanceConvertDiskToPlain(instance, inodes):
"-n", inodes[1].primary, name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceModifyDisks(instance):
"""gnt-instance modify --disk"""
if not IsDiskSupported(instance):
print qa_utils.FormatInfo("Instance doesn't support disks, skipping test")
return
size = qa_config.GetDiskOptions()[-1].get("size")
name = instance.name
build_cmd = lambda arg: ["gnt-instance", "modify", "--disk", arg, name]
AssertCommand(build_cmd("add:size=%s" % size))
AssertCommand(build_cmd("remove"))
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestInstanceGrowDisk(instance):
"""gnt-instance grow-disk"""
......@@ -975,7 +993,13 @@ def TestRecreateDisks(instance, inodes, othernodes):
else:
_AssertRecreateDisks(["-n", other_seq], instance)
# Move disks back
_AssertRecreateDisks(["-n", orig_seq], instance, check=False)
_AssertRecreateDisks(["-n", orig_seq], instance)
# Recreate the disks one by one
for idx in range(0, len(qa_config.GetDiskOptions())):
# Only the first call should destroy all the disk
destroy = (idx == 0)
_AssertRecreateDisks(["--disk=%s" % idx], instance, destroy=destroy,
check=False)
# This and InstanceCheck decoration check that the disks are working
AssertCommand(["gnt-instance", "reinstall", "-f", instance.name])
AssertCommand(["gnt-instance", "start", instance.name])
......
......@@ -62,6 +62,20 @@ start() {
# Stop the master IP
stop() {
# Check if the master IP address is still configured on this machine
if ! ip addr show dev $MASTER_NETDEV | \
grep -F " $MASTER_IP/$MASTER_NETMASK" >/dev/null 2>&1; then
# Check if the master IP address is configured on a wrong device
if fping -S 127.0.0.1 $MASTER_IP >/dev/null 2>&1; then
echo "Error: master IP address configured on wrong device," \
"can't shut it down." >&2
exit 1
else
echo "Master IP address not configured on this machine. Doing nothing."
exit 0
fi
fi
if ! ip addr del $MASTER_IP/$MASTER_NETMASK dev $MASTER_NETDEV; then
echo "Error during the deactivation of the master IP address" >&2
exit 1
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment