Commit 0eab2c0d authored by Klaus Aehlig's avatar Klaus Aehlig

Merge branch 'stable-2.11' into stable-2.12

* stable-2.11
  Fix invalid use of RpcResult.Raise
  Bump revision to 2.11.5
  Prepare NEWS file for 2.11.5 release

* stable-2.10
  On upgrades, check for upgrades to resume first
  Pause watcher during upgrade
  Allow instance disks to be added with --no-wait-for-sync
  Bump revision to 2.10.7
  Prepare NEWS file for 2.10.7 release
  Fix lint error
  Create the config backup archive in a safe way

Conflicts:
	NEWS: take ALL the entries
	configure.ac: ignore revision bump
Signed-off-by: default avatarKlaus Aehlig <aehlig@google.com>
Reviewed-by: default avatarPetr Pudlak <pudlak@google.com>
parents 3077d76e 7bcc5fc0
......@@ -72,6 +72,49 @@ before rc1:
inresponsive
Version 2.11.5
--------------
*(Released Thu, 7 Aug 2014)*
Inherited from the 2.10 branch:
Important security release. In 2.10.0, the
'gnt-cluster upgrade' command was introduced. Before
performing an upgrade, the configuration directory of
the cluster is backed up. Unfortunately, the archive was
written with permissions that make it possible for
non-privileged users to read the archive and thus have
access to cluster and RAPI keys. After this release,
the archive will be created with privileged access only.
We strongly advise you to restrict the permissions of
previously created archives. The archives are found in
/var/lib/ganeti*.tar (unless otherwise configured with
--localstatedir or --with-backup-dir).
If you suspect that non-privileged users have accessed
your archives already, we advise you to renew the
cluster's crypto keys using 'gnt-cluster renew-crypto'
and to reset the RAPI credentials by editing
/var/lib/ganeti/rapi_users (respectively under a
different path if configured differently with
--localstatedir).
Other changes included in this release:
- Fix handling of Xen instance states.
- Fix NIC configuration with absent NIC VLAN
- Adapt relative path expansion in PATH to new environment
- Exclude archived jobs from configuration backups
- Fix RAPI for split query setup
- Allow disk hot-remove even with chroot or SM
Inherited from the 2.9 branch:
- Make htools tolerate missing 'spfree' on luxi
Version 2.11.4
--------------
......@@ -358,6 +401,47 @@ This was the first beta release of the 2.11 series. All important changes
are listed in the latest 2.11 entry.
Version 2.10.7
--------------
*(Released Thu, 7 Aug 2014)*
Important security release. In 2.10.0, the
'gnt-cluster upgrade' command was introduced. Before
performing an upgrade, the configuration directory of
the cluster is backed up. Unfortunately, the archive was
written with permissions that make it possible for
non-privileged users to read the archive and thus have
access to cluster and RAPI keys. After this release,
the archive will be created with privileged access only.
We strongly advise you to restrict the permissions of
previously created archives. The archives are found in
/var/lib/ganeti*.tar (unless otherwise configured with
--localstatedir or --with-backup-dir).
If you suspect that non-privileged users have accessed
your archives already, we advise you to renew the
cluster's crypto keys using 'gnt-cluster renew-crypto'
and to reset the RAPI credentials by editing
/var/lib/ganeti/rapi_users (respectively under a
different path if configured differently with
--localstatedir).
Other changes included in this release:
- Fix handling of Xen instance states.
- Fix NIC configuration with absent NIC VLAN
- Adapt relative path expansion in PATH to new environment
- Exclude archived jobs from configuration backups
- Fix RAPI for split query setup
- Allow disk hot-remove even with chroot or SM
Inherited from the 2.9 branch:
- Make htools tolerate missing 'spfree' on luxi
Version 2.10.6
--------------
......
......@@ -30,6 +30,7 @@ from cStringIO import StringIO
import os
import time
import OpenSSL
import tempfile
import itertools
from ganeti.cli import *
......@@ -1917,6 +1918,10 @@ def _UpgradeBeforeConfigurationChange(versionstring):
ToStderr("Failed to completely empty the queue.")
return (False, rollback)
ToStdout("Pausing the watcher for one hour.")
rollback.append(lambda: GetClient().SetWatcherPause(None))
GetClient().SetWatcherPause(time.time() + 60 * 60)
ToStdout("Stopping daemons on master node.")
if not _RunCommandAndReport([pathutils.DAEMON_UTIL, "stop-all"]):
return (False, rollback)
......@@ -1936,11 +1941,16 @@ def _UpgradeBeforeConfigurationChange(versionstring):
ToStdout("Backing up configuration as %s" % backuptar)
if not _RunCommandAndReport(["mkdir", "-p", pathutils.BACKUP_DIR]):
return (False, rollback)
if not _RunCommandAndReport(["tar", "-cf", backuptar,
# Create the archive in a safe manner, as it contains sensitive
# information.
(_, tmp_name) = tempfile.mkstemp(prefix=backuptar, dir=pathutils.BACKUP_DIR)
if not _RunCommandAndReport(["tar", "-cf", tmp_name,
"--exclude=queue/archive",
pathutils.DATA_DIR]):
return (False, rollback)
os.rename(tmp_name, backuptar)
return (True, rollback)
......@@ -2063,6 +2073,10 @@ def _UpgradeAfterConfigurationChange(oldversion):
if not _RunCommandAndReport([pathutils.POST_UPGRADE, oldversion]):
returnvalue = 1
ToStdout("Unpasuing the watcher.")
if not _RunCommandAndReport(["gnt-cluster", "watcher", "continue"]):
returnvalue = 1
ToStdout("Verifying cluster.")
if not _RunCommandAndReport(["gnt-cluster", "verify"]):
returnvalue = 1
......@@ -2086,6 +2100,22 @@ def UpgradeGanetiCommand(opts, args):
" has to be given")
return 1
# If we're not told to resume, verify there is no upgrade
# in progress.
if not opts.resume:
oldversion, versionstring = _ReadIntentToUpgrade()
if versionstring is not None:
# An upgrade is going on; verify whether the target matches
if versionstring == opts.to:
ToStderr("An upgrade is already in progress. Target version matches,"
" resuming.")
opts.resume = True
opts.to = None
else:
ToStderr("An upgrade from %s to %s is in progress; use --resume to"
" finish it first" % (oldversion, versionstring))
return 1
oldversion = constants.RELEASE_VERSION
if opts.resume:
......
......@@ -3128,11 +3128,11 @@ class LUInstanceSetParams(LogicalUnit):
constants.DT_EXT),
errors.ECODE_INVAL)
if not self.op.wait_for_sync and self.instance.disks_active:
if not self.op.wait_for_sync and not self.instance.disks_active:
for mod in self.diskmod:
if mod[0] == constants.DDM_ADD:
raise errors.OpPrereqError("Can't add a disk to an instance with"
" activated disks and"
" deactivated disks and"
" --no-wait-for-sync given.",
errors.ECODE_INVAL)
......
......@@ -177,7 +177,7 @@ class LUInstanceStartup(LogicalUnit):
self.instance.primary_node,
self.instance,
self.op.shutdown_timeout, self.op.reason)
result.Raise("Could not shutdown instance '%s'", self.instance.name)
result.Raise("Could not shutdown instance '%s'" % self.instance.name)
ShutdownInstanceDisks(self, self.instance)
......@@ -191,7 +191,7 @@ class LUInstanceStartup(LogicalUnit):
self.op.startup_paused, self.op.reason)
if result.fail_msg:
ShutdownInstanceDisks(self, self.instance)
result.Raise("Could not start instance '%s'", self.instance.name)
result.Raise("Could not start instance '%s'" % self.instance.name)
class LUInstanceShutdown(LogicalUnit):
......@@ -296,7 +296,7 @@ class LUInstanceShutdown(LogicalUnit):
self.instance.primary_node,
self.instance,
self.op.timeout, self.op.reason)
result.Raise("Could not shutdown instance '%s'", self.instance.name)
result.Raise("Could not shutdown instance '%s'" % self.instance.name)
ShutdownInstanceDisks(self, self.instance)
......
......@@ -2214,9 +2214,8 @@ class TestLUInstanceSetParams(CmdlibTestCase):
constants.IDISK_SIZE: 1024
}]],
wait_for_sync=False)
self.ExecOpCodeExpectOpPrereqError(
op, "Can't add a disk to an instance with activated disks"
" and --no-wait-for-sync given.")
self.ExecOpCode(op)
self.assertFalse(self.rpc.call_blockdev_shutdown.called)
def testAddDiskDownInstance(self):
op = self.CopyOpCode(self.op,
......@@ -2228,6 +2227,17 @@ class TestLUInstanceSetParams(CmdlibTestCase):
self.assertTrue(self.rpc.call_blockdev_shutdown.called)
def testAddDiskDownInstanceNoWaitForSync(self):
op = self.CopyOpCode(self.op,
disks=[[constants.DDM_ADD, -1,
{
constants.IDISK_SIZE: 1024
}]],
wait_for_sync=False)
self.ExecOpCodeExpectOpPrereqError(
op, "Can't add a disk to an instance with deactivated disks"
" and --no-wait-for-sync given.")
def testAddDiskRunningInstance(self):
op = self.CopyOpCode(self.running_op,
disks=[[constants.DDM_ADD, -1,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment