diff --git a/Makefile.am b/Makefile.am
index c24d682b21adb6708aa67a9601244292e1085cb3..9e84594cad203a26d089d06e09bf8e70a4f9b76f 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -503,7 +503,7 @@ nodist_sbin_SCRIPTS = \
$(PYTHON_BOOTSTRAP_SBIN) \
daemons/ganeti-cleaner
-dist_tools_PYTHON = \
+python_scripts = \
tools/burnin \
tools/cfgshell \
tools/cfgupgrade \
@@ -515,7 +515,7 @@ dist_tools_PYTHON = \
tools/sanitize-config
dist_tools_SCRIPTS = \
- $(dist_tools_PYTHON) \
+ $(python_scripts) \
tools/kvm-console-wrapper \
tools/xm-console-wrapper
@@ -729,7 +729,7 @@ TESTS_ENVIRONMENT = \
all_python_code = \
$(dist_sbin_SCRIPTS) \
- $(dist_tools_PYTHON) \
+ $(python_scripts) \
$(pkglib_python_scripts) \
$(nodist_pkglib_python_scripts) \
$(python_tests) \
@@ -766,7 +766,7 @@ lint_python_code = \
ganeti \
ganeti/http/server.py \
$(dist_sbin_SCRIPTS) \
- $(dist_tools_PYTHON) \
+ $(python_scripts) \
$(pkglib_python_scripts) \
$(BUILD_BASH_COMPLETION) \
$(DOCPP) \
@@ -776,7 +776,7 @@ pep8_python_code = \
ganeti \
ganeti/http/server.py \
$(dist_sbin_SCRIPTS) \
- $(dist_tools_PYTHON) \
+ $(python_scripts) \
$(pkglib_python_scripts) \
$(BUILD_BASH_COMPLETION) \
$(DOCPP) \
diff --git a/devel/upload.in b/devel/upload.in
index 885a51d220598ae187f8eeed70a977e410020fd9..91682859a91707fdd59acea48ae96025a227219c 100644
--- a/devel/upload.in
+++ b/devel/upload.in
@@ -76,6 +76,9 @@ else
make_args=
fi
+# Make sure that directories will get correct permissions
+umask 0022
+
# install ganeti as a real tree
make $make_args install DESTDIR="$TXD"
diff --git a/lib/client/gnt_node.py b/lib/client/gnt_node.py
index 0fe37c7ad2b3b5264390ebeed6c155f14f0c14db..4858b0854adc91977531935c8016ad29296fda84 100644
--- a/lib/client/gnt_node.py
+++ b/lib/client/gnt_node.py
@@ -1,7 +1,7 @@
#
#
-# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
+# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -865,8 +865,7 @@ commands = {
[FORCE_OPT, IALLOCATOR_OPT, NEW_SECONDARY_OPT, EARLY_RELEASE_OPT,
PRIORITY_OPT, PRIMARY_ONLY_OPT, SECONDARY_ONLY_OPT],
"[-f] {-I <iallocator> | -n <dst>} <node>",
- "Relocate the secondary instances from a node"
- " to other nodes"),
+ "Relocate the primary and/or secondary instances from a node"),
"failover": (
FailoverNode, ARGS_ONE_NODE, [FORCE_OPT, IGNORE_CONSIST_OPT,
IALLOCATOR_OPT, PRIORITY_OPT],
diff --git a/lib/cmdlib.py b/lib/cmdlib.py
index ea282119060f34058e91d05e7c2dd6c64f6227ca..c65da1d600320f2e8c262f7c2e59e33bab679677 100644
--- a/lib/cmdlib.py
+++ b/lib/cmdlib.py
@@ -1704,7 +1704,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
# Get instances in node group; this is unsafe and needs verification later
- inst_names = self.cfg.GetNodeGroupInstances(self.group_uuid)
+ inst_names = \
+ self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
self.needed_locks = {
locking.LEVEL_INSTANCE: inst_names,
@@ -1738,7 +1739,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
group_nodes = set(self.group_info.members)
- group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
+ group_instances = \
+ self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
unlocked_nodes = \
group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
@@ -1748,11 +1750,13 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
if unlocked_nodes:
raise errors.OpPrereqError("Missing lock for nodes: %s" %
- utils.CommaJoin(unlocked_nodes))
+ utils.CommaJoin(unlocked_nodes),
+ errors.ECODE_STATE)
if unlocked_instances:
raise errors.OpPrereqError("Missing lock for instances: %s" %
- utils.CommaJoin(unlocked_instances))
+ utils.CommaJoin(unlocked_instances),
+ errors.ECODE_STATE)
self.all_node_info = self.cfg.GetAllNodesInfo()
self.all_inst_info = self.cfg.GetAllInstancesInfo()
@@ -1772,17 +1776,17 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
for inst in self.my_inst_info.values():
if inst.disk_template in constants.DTS_INT_MIRROR:
- group = self.my_node_info[inst.primary_node].group
- for nname in inst.secondary_nodes:
- if self.all_node_info[nname].group != group:
+ for nname in inst.all_nodes:
+ if self.all_node_info[nname].group != self.group_uuid:
extra_lv_nodes.add(nname)
unlocked_lv_nodes = \
extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
if unlocked_lv_nodes:
- raise errors.OpPrereqError("these nodes could be locked: %s" %
- utils.CommaJoin(unlocked_lv_nodes))
+ raise errors.OpPrereqError("Missing node locks for LV check: %s" %
+ utils.CommaJoin(unlocked_lv_nodes),
+ errors.ECODE_STATE)
self.extra_lv_nodes = list(extra_lv_nodes)
def _VerifyNode(self, ninfo, nresult):
@@ -2052,7 +2056,8 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
"""
for node, n_img in node_image.items():
- if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
+ if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
+ self.all_node_info[node].group != self.group_uuid):
# skip non-healthy nodes
continue
for volume in n_img.volumes:
@@ -2079,11 +2084,11 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
# WARNING: we currently take into account down instances as well
# as up ones, considering that even if they're down someone
# might want to start them even in the event of a node failure.
- if n_img.offline:
- # we're skipping offline nodes from the N+1 warning, since
- # most likely we don't have good memory infromation from them;
- # we already list instances living on such nodes, and that's
- # enough warning
+ if n_img.offline or self.all_node_info[node].group != self.group_uuid:
+ # we're skipping nodes marked offline and nodes in other groups from
+ # the N+1 warning, since most likely we don't have good memory
+ # infromation from them; we already list instances living on such
+ # nodes, and that's enough warning
continue
for prinode, instances in n_img.sbp.items():
needed_mem = 0
diff --git a/lib/opcodes.py b/lib/opcodes.py
index 6a922c9467e51b266cbc356f840fce1083363085..3b348a249a8644968ae79fb3706222cc8a2db29a 100644
--- a/lib/opcodes.py
+++ b/lib/opcodes.py
@@ -811,7 +811,7 @@ class OpQuery(OpCode):
_PUseLocking,
("fields", ht.NoDefault, ht.TListOf(ht.TNonEmptyString),
"Requested fields"),
- ("filter", None, ht.TOr(ht.TNone, ht.TListOf),
+ ("filter", None, ht.TOr(ht.TNone, ht.TList),
"Query filter"),
]
diff --git a/man/gnt-node.rst b/man/gnt-node.rst
index 7cfe9cecfe43b396a62090197a5b0d994dada728..f94da9909b966f535a420175b713cf61fb156bc1 100644
--- a/man/gnt-node.rst
+++ b/man/gnt-node.rst
@@ -118,7 +118,12 @@ potential recovery).
Note that this command is equivalent to using per-instance commands for
each affected instance individually:
-- ``--primary-only`` is equivalent to ``gnt-instance failover/migration``
+- ``--primary-only`` is equivalent to ``gnt-instance
+ failover/migration`` for non-DRBD instances, but for DRBD instances
+ it's different, and usually is a slow process (it will change the
+ primary to another node while keeping the secondary, this requiring
+ data copies, whereas failover/migrate will only toggle the
+ primary/secondary roles, a fast process)
- ``--secondary-only`` is equivalent to ``gnt-instance replace-disks``
in the secondary node change mode (only valid for DRBD instances)
- when neither of the above is done a combination of the two cases is run