Commit 5ad68a23 authored by Michael Hanselmann's avatar Michael Hanselmann
Browse files

Merge branch 'devel-2.5'



* devel-2.5: (29 commits)
  gnt-* {add,list,remove}-tags: Unify options
  Bump version for 2.5.0 final release
  configure.ac: Fix “too many arguments” error
  Fix extra whitespace
  Further fixes concerning drbd port release
  Fix a bug concerning TCP port release
  Fix extra whitespace
  Fix a bug concerning TCP port release
  ganeti.initd: Add “status” action
  Add whitelist for opcodes using BGL
  LUOobCommand: acquire BGL in shared mode
  Fix docstring bug
  LUNodeAdd: Verify version in Prereq
  LUNodeAdd: Verify version in Prereq
  Fix LV status parsing to accept newer LVM
  gnt-instance info: Show node group information
  cmdlib: Factorize checking acquired node group locks
  Bump version for 2.5.0~rc6 release
  cmdlib: Stop forking in LUClusterQuery
  locking: Notify only once on release
  ...

Conflicts:
	NEWS: Trivial
	daemons/daemon-util.in: Copyright line
	lib/client/gnt_group.py: Tag operations
	lib/cmdlib.py: Not so trivial, hopefully correct
Signed-off-by: default avatarMichael Hanselmann <hansmi@google.com>
Reviewed-by: default avatarIustin Pop <iustin@google.com>
parents d6f58310 6bc3ed14
......@@ -20,10 +20,10 @@ Version 2.6.0 beta1
a future version. :pyeval:`luxi.REQ_QUERY` should be used instead.
Version 2.5.0 rc5
-----------------
Version 2.5.0
-------------
*(Released Mon, 9 Jan 2012)*
*(Released Thu, 12 Apr 2012)*
Incompatible/important changes and bugfixes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
......@@ -59,6 +59,9 @@ Incompatible/important changes and bugfixes
- Offline nodes are ignored when failing over an instance.
- Support for KVM version 1.0, which changed the version reporting format
from 3 to 2 digits.
- TCP/IP ports used by DRBD disks are returned to a pool upon instance
removal.
- ``Makefile`` is now compatible with Automake 1.11.2
- Includes all bugfixes made in the 2.4 series
New features
......@@ -151,6 +154,25 @@ Misc
- A short delay when waiting for job changes reduces the number of LUXI
requests significantly.
- DRBD metadata volumes are overwritten with zeros during disk creation.
- Out-of-band commands no longer acquire the cluster lock in exclusive
mode.
- ``devel/upload`` now uses correct permissions for directories.
Version 2.5.0 rc6
-----------------
*(Released Fri, 23 Mar 2012)*
This was the sixth release candidate of the 2.5 series.
Version 2.5.0 rc5
-----------------
*(Released Mon, 9 Jan 2012)*
This was the fifth release candidate of the 2.5 series.
Version 2.5.0 rc4
......
......@@ -2,7 +2,7 @@
m4_define([gnt_version_major], [2])
m4_define([gnt_version_minor], [5])
m4_define([gnt_version_revision], [0])
m4_define([gnt_version_suffix], [~rc5])
m4_define([gnt_version_suffix], [])
m4_define([gnt_version_full],
m4_format([%d.%d.%d%s],
gnt_version_major, gnt_version_minor,
......
#!/bin/bash
#
# Copyright (C) 2009, 2011 Google Inc.
# Copyright (C) 2009, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -111,6 +111,30 @@ check_exitcode() {
return 0
}
# Prints path to PID file for a daemon.
daemon_pidfile() {
if [[ "$#" -lt 1 ]]; then
echo 'Missing daemon name.' >&2
return 1
fi
local name="$1"; shift
_daemon_pidfile $name
}
# Prints path to daemon executable.
daemon_executable() {
if [[ "$#" -lt 1 ]]; then
echo 'Missing daemon name.' >&2
return 1
fi
local name="$1"; shift
_daemon_executable $name
}
# Prints a list of all daemons in the order in which they should be started
list_start_daemons() {
local name
......
......@@ -75,6 +75,30 @@ stop_all() {
done
}
status_all() {
local daemons="$1" status ret
if [ -z "$daemons" ]; then
daemons=$($DAEMON_UTIL list-start-daemons)
fi
status=0
for i in $daemons; do
if status_of_proc $($DAEMON_UTIL daemon-executable $i) $i; then
ret=0
else
ret=$?
# Use exit code from first failed call
if [ "$status" -eq 0 ]; then
status=$ret
fi
fi
done
exit $status
}
if [ -n "$2" ] && ! errmsg=$($DAEMON_UTIL is-daemon-name "$2" 2>&1); then
log_failure_msg "$errmsg"
exit 1
......@@ -94,6 +118,9 @@ case "$1" in
stop_all "$2"
start_all "$2"
;;
status)
status_all "$2"
;;
*)
log_success_msg "Usage: $SCRIPTNAME {start|stop|force-reload|restart}"
exit 1
......
#
#
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
......@@ -618,8 +618,8 @@ class LogicalVolume(BlockDev):
return False
status, major, minor, pe_size, stripes = out
if len(status) != 6:
logging.error("lvs lv_attr is not 6 characters (%s)", status)
if len(status) < 6:
logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
return False
try:
......
......@@ -468,7 +468,7 @@ def AddTags(opts, args):
if not args:
raise errors.OpPrereqError("No tags to be added")
op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
SubmitOpCode(op, opts=opts)
SubmitOrSend(op, opts)
def RemoveTags(opts, args):
......@@ -485,7 +485,7 @@ def RemoveTags(opts, args):
if not args:
raise errors.OpPrereqError("No tags to be removed")
op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
SubmitOpCode(op, opts=opts)
SubmitOrSend(op, opts)
def check_unit(option, opt, value): # pylint: disable=W0613
......
......@@ -1181,12 +1181,13 @@ def _OobPower(opts, node_list, power):
return True
def _InstanceStart(opts, inst_list, start):
def _InstanceStart(opts, inst_list, start, no_remember=False):
"""Puts the instances in the list to desired state.
@param opts: The command line options selected by the user
@param inst_list: The list of instances to operate on
@param start: True if they should be started, False for shutdown
@param no_remember: If the instance state should be remembered
@return: The success of the operation (none failed)
"""
......@@ -1195,7 +1196,8 @@ def _InstanceStart(opts, inst_list, start):
text_submit, text_success, text_failed = ("startup", "started", "starting")
else:
opcls = compat.partial(opcodes.OpInstanceShutdown,
timeout=opts.shutdown_timeout)
timeout=opts.shutdown_timeout,
no_remember=no_remember)
text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
jex = JobExecutor(opts=opts)
......@@ -1375,7 +1377,7 @@ def _EpoOff(opts, node_list, inst_map):
@return: The desired exit status
"""
if not _InstanceStart(opts, inst_map.keys(), False):
if not _InstanceStart(opts, inst_map.keys(), False, no_remember=True):
ToStderr("Please investigate and stop instances manually before continuing")
return constants.EXIT_FAILURE
......@@ -1521,10 +1523,10 @@ commands = {
"list-tags": (
ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
"add-tags": (
AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"tag...", "Add tags to the cluster"),
"remove-tags": (
RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"tag...", "Remove tags from the cluster"),
"search-tags": (
SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
......
......@@ -310,15 +310,15 @@ commands = {
"[-I <iallocator>] [--to <group>]",
"Evacuate all instances within a group"),
"list-tags": (
ListTags, ARGS_ONE_GROUP, [PRIORITY_OPT],
ListTags, ARGS_ONE_GROUP, [],
"<group_name>", "List the tags of the given group"),
"add-tags": (
AddTags, [ArgGroup(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT],
[TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"<group_name> tag...", "Add tags to the given group"),
"remove-tags": (
RemoveTags, [ArgGroup(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT],
[TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"<group_name> tag...", "Remove tags from the given group"),
}
......
......@@ -1203,7 +1203,15 @@ def ShowInstanceConfig(opts, args):
## instance["auto_balance"])
buf.write(" Nodes:\n")
buf.write(" - primary: %s\n" % instance["pnode"])
buf.write(" - secondaries: %s\n" % utils.CommaJoin(instance["snodes"]))
buf.write(" group: %s (UUID %s)\n" %
(instance["pnode_group_name"], instance["pnode_group_uuid"]))
buf.write(" - secondaries: %s\n" %
utils.CommaJoin("%s (group %s, group UUID %s)" %
(name, group_name, group_uuid)
for (name, group_name, group_uuid) in
zip(instance["snodes"],
instance["snodes_group_names"],
instance["snodes_group_uuids"])))
buf.write(" Operating system: %s\n" % instance["os"])
FormatParameterDict(buf, instance["os_instance"], instance["os_actual"],
level=2)
......@@ -1638,15 +1646,15 @@ commands = {
[TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT],
"[-I <iallocator>] [--to <group>]", "Change group of instance"),
"list-tags": (
ListTags, ARGS_ONE_INSTANCE, [PRIORITY_OPT],
ListTags, ARGS_ONE_INSTANCE, [],
"<instance_name>", "List the tags of the given instance"),
"add-tags": (
AddTags, [ArgInstance(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT],
[TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"<instance_name> tag...", "Add tags to the given instance"),
"remove-tags": (
RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT],
[TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"<instance_name> tag...", "Remove tags from given instance"),
}
......
......@@ -974,15 +974,16 @@ commands = {
ListTags, ARGS_ONE_NODE, [],
"<node_name>", "List the tags of the given node"),
"add-tags": (
AddTags, [ArgNode(min=1, max=1), ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
AddTags, [ArgNode(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"<node_name> tag...", "Add tags to the given node"),
"remove-tags": (
RemoveTags, [ArgNode(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT],
[TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT],
"<node_name> tag...", "Remove tags from the given node"),
"health": (
Health, ARGS_MANY_NODES,
[NOHDR_OPT, SEP_OPT, SUBMIT_OPT, PRIORITY_OPT, OOB_TIMEOUT_OPT],
[NOHDR_OPT, SEP_OPT, PRIORITY_OPT, OOB_TIMEOUT_OPT],
"[<node_name>...]", "List health of node(s) using out-of-band"),
}
......
......@@ -32,7 +32,6 @@ import os
import os.path
import time
import re
import platform
import logging
import copy
import OpenSSL
......@@ -60,6 +59,7 @@ from ganeti import qlang
from ganeti import opcodes
from ganeti import ht
from ganeti import rpc
from ganeti import runtime
 
import ganeti.masterd.instance # pylint: disable=W0611
 
......@@ -596,6 +596,32 @@ def _MakeLegacyNodeInfo(data):
})
 
 
def _CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_nodes,
cur_group_uuid):
"""Checks if node groups for locked instances are still correct.
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration
@type instances: dict; string as key, L{objects.Instance} as value
@param instances: Dictionary, instance name as key, instance object as value
@type owned_groups: iterable of string
@param owned_groups: List of owned groups
@type owned_nodes: iterable of string
@param owned_nodes: List of owned nodes
@type cur_group_uuid: string or None
@param cur_group_uuid: Optional group UUID to check against instance's groups
"""
for (name, inst) in instances.items():
assert owned_nodes.issuperset(inst.all_nodes), \
"Instance %s's nodes changed while we kept the lock" % name
inst_groups = _CheckInstanceNodeGroups(cfg, name, owned_groups)
assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
"Instance %s has no node in group %s" % (name, cur_group_uuid)
def _CheckInstanceNodeGroups(cfg, instance_name, owned_groups):
"""Checks if the owned node groups are still correct for an instance.
 
......@@ -1885,7 +1911,7 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
"""Verifies the cluster config.
 
"""
REQ_BGL = True
REQ_BGL = False
 
def _VerifyHVP(self, hvp_data):
"""Verifies locally the syntax of the hypervisor parameters.
......@@ -1902,13 +1928,17 @@ class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
 
def ExpandNames(self):
# Information can be safely retrieved as the BGL is acquired in exclusive
# mode
assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
self.share_locks = _ShareAll()
def CheckPrereq(self):
"""Check prerequisites.
"""
# Retrieve all information
self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
self.all_node_info = self.cfg.GetAllNodesInfo()
self.all_inst_info = self.cfg.GetAllInstancesInfo()
self.needed_locks = {}
 
def Exec(self, feedback_fn):
"""Verify integrity of cluster, performing various test on nodes.
......@@ -3499,15 +3529,8 @@ class LUGroupVerifyDisks(NoHooksLU):
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
 
# Check if node groups for locked instances are still correct
for (instance_name, inst) in self.instances.items():
assert owned_nodes.issuperset(inst.all_nodes), \
"Instance %s's nodes changed while we kept the lock" % instance_name
inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
owned_groups)
assert self.group_uuid in inst_groups, \
"Instance %s has no node in group %s" % (instance_name, self.group_uuid)
_CheckInstancesNodeGroups(self.cfg, self.instances,
owned_groups, owned_nodes, self.group_uuid)
 
def Exec(self, feedback_fn):
"""Verify integrity of cluster disks.
......@@ -4512,7 +4535,7 @@ class LUOobCommand(NoHooksLU):
"""Logical unit for OOB handling.
 
"""
REG_BGL = False
REQ_BGL = False
_SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
 
def ExpandNames(self):
......@@ -6096,7 +6119,7 @@ class LUClusterQuery(NoHooksLU):
"config_version": constants.CONFIG_VERSION,
"os_api_version": max(constants.OS_API_VERSIONS),
"export_version": constants.EXPORT_VERSION,
"architecture": (platform.architecture()[0], platform.machine()),
"architecture": runtime.GetArchInfo(),
"name": cluster.cluster_name,
"master": cluster.master_node,
"default_hypervisor": cluster.primary_hypervisor,
......@@ -7319,7 +7342,7 @@ def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
"""
logging.info("Removing block devices for instance %s", instance.name)
 
if not _RemoveDisks(lu, instance):
if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
if not ignore_failures:
raise errors.OpExecError("Can't remove instance's disks")
feedback_fn("Warning: can't remove instance's disks")
......@@ -8934,7 +8957,7 @@ def _CreateDisks(lu, instance, to_skip=None, target_node=None):
_CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
 
 
def _RemoveDisks(lu, instance, target_node=None):
def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
"""Remove all disks for an instance.
 
This abstracts away some work from `AddInstance()` and
......@@ -8955,6 +8978,7 @@ def _RemoveDisks(lu, instance, target_node=None):
logging.info("Removing block devices for instance %s", instance.name)
 
all_result = True
ports_to_release = set()
for (idx, device) in enumerate(instance.disks):
if target_node:
edata = [(target_node, device)]
......@@ -8970,8 +8994,11 @@ def _RemoveDisks(lu, instance, target_node=None):
 
# if this is a DRBD disk, return its port to the pool
if device.dev_type in constants.LDS_DRBD:
tcp_port = device.logical_id[2]
lu.cfg.AddTcpUdpPort(tcp_port)
ports_to_release.add(device.logical_id[2])
if all_result or ignore_failures:
for port in ports_to_release:
lu.cfg.AddTcpUdpPort(port)
 
if instance.disk_template == constants.DT_FILE:
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
......@@ -11656,12 +11683,25 @@ class LUInstanceQueryData(NoHooksLU):
else:
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
 
self.needed_locks[locking.LEVEL_NODEGROUP] = []
self.needed_locks[locking.LEVEL_NODE] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
 
def DeclareLocks(self, level):
if self.op.use_locking and level == locking.LEVEL_NODE:
self._LockInstancesNodes()
if self.op.use_locking:
if level == locking.LEVEL_NODEGROUP:
owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
# Lock all groups used by instances optimistically; this requires going
# via the node before it's locked, requiring verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
frozenset(group_uuid
for instance_name in owned_instances
for group_uuid in
self.cfg.GetInstanceNodeGroups(instance_name))
elif level == locking.LEVEL_NODE:
self._LockInstancesNodes()
 
def CheckPrereq(self):
"""Check prerequisites.
......@@ -11669,12 +11709,23 @@ class LUInstanceQueryData(NoHooksLU):
This only checks the optional instance list against the existing names.
 
"""
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
if self.wanted_names is None:
assert self.op.use_locking, "Locking was not used"
self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
self.wanted_names = owned_instances
 
self.wanted_instances = \
map(compat.snd, self.cfg.GetMultiInstanceInfo(self.wanted_names))
instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
if self.op.use_locking:
_CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
None)
else:
assert not (owned_instances or owned_groups or owned_nodes)
self.wanted_instances = instances.values()
 
def _ComputeBlockdevStatus(self, node, instance_name, dev):
"""Returns the status of a block device
......@@ -11739,9 +11790,17 @@ class LUInstanceQueryData(NoHooksLU):
 
cluster = self.cfg.GetClusterInfo()
 
pri_nodes = self.cfg.GetMultiNodeInfo(i.primary_node
for i in self.wanted_instances)
for instance, (_, pnode) in zip(self.wanted_instances, pri_nodes):
node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
for node in nodes.values()))
group2name_fn = lambda uuid: groups[uuid].name
for instance in self.wanted_instances:
pnode = nodes[instance.primary_node]
if self.op.static or pnode.offline:
remote_state = None
if pnode.offline:
......@@ -11765,12 +11824,19 @@ class LUInstanceQueryData(NoHooksLU):
disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
instance.disks)
 
snodes_group_uuids = [nodes[snode_name].group
for snode_name in instance.secondary_nodes]
result[instance.name] = {
"name": instance.name,
"config_state": instance.admin_state,
"run_state": remote_state,
"pnode": instance.primary_node,
"pnode_group_uuid": pnode.group,
"pnode_group_name": group2name_fn(pnode.group),
"snodes": instance.secondary_nodes,
"snodes_group_uuids": snodes_group_uuids,
"snodes_group_names": map(group2name_fn, snodes_group_uuids),
"os": instance.os,
# this happens to be the same format used for hooks
"nics": _NICListToTuple(self, instance.nics),
......@@ -12564,6 +12630,12 @@ class LUInstanceSetParams(LogicalUnit):
child.size = parent.size
child.mode = parent.mode
 
# this is a DRBD disk, return its port to the pool
# NOTE: this must be done right before the call to cfg.Update!
for disk in old_disks:
tcp_port = disk.logical_id[2]
self.cfg.AddTcpUdpPort(tcp_port)
# update instance structure
instance.disks = new_disks
instance.disk_template = constants.DT_PLAIN
......@@ -12589,13 +12661,6 @@ class LUInstanceSetParams(LogicalUnit):
self.LogWarning("Could not remove metadata for disk %d on node %s,"
" continuing anyway: %s", idx, pnode, msg)
 
# this is a DRBD disk, return its port to the pool
for disk in old_disks:
tcp_port = disk.logical_id[2]
self.cfg.AddTcpUdpPort(tcp_port)
# Node resource locks will be released by caller
def _CreateNewDisk(self, idx, params, _):
"""Creates a new disk.
 
......@@ -12890,7 +12955,7 @@ class LUInstanceChangeGroup(LogicalUnit):
 
if self.req_target_uuids:
# User requested specific target groups
self.target_uuids = self.req_target_uuids
self.target_uuids = frozenset(self.req_target_uuids)
else:
# All groups except those used by the instance are potential targets
self.target_uuids = owned_groups - inst_groups
......@@ -14081,16 +14146,8 @@ class LUGroupEvacuate(LogicalUnit):
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
 
# Check if node groups for locked instances are still correct
for instance_name in owned_instances:
inst = self.instances[instance_name]
assert owned_nodes.issuperset(inst.all_nodes), \
"Instance %s's nodes changed while we kept the lock" % instance_name
inst_groups = _CheckInstanceNodeGroups(self.cfg, instance_name,
owned_groups)
assert self.group_uuid in inst_groups, \
"Instance %s has no node in group %s" % (instance_name, self.group_uuid)
_CheckInstancesNodeGroups(self.cfg, self.instances,
owned_groups, owned_nodes, self.group_uuid)
 
if self.req_target_uuids:
# User requested specific target groups
......
......@@ -723,12 +723,15 @@ class ConfigWriter:
def AddTcpUdpPort(self, port):
"""Adds a new port to the available port pool.
@warning: this method does not "flush" the configuration (via
L{_WriteConfig}); callers should do that themselves once the
configuration is stable
"""
if not isinstance(port, int):
raise errors.ProgrammerError("Invalid type passed for port")
self._config_data.cluster.tcpudp_port_pool.add(port)
self._WriteConfig()
@locking.ssynchronized(_config_lock, shared=1)
def GetPortList(self):
......@@ -1182,6 +1185,17 @@ class ConfigWriter:
for member_name in
self._UnlockedGetNodeGroup(ngfn(node_name)).members)
@locking.ssynchronized(_config_lock, shared=1)
def GetMultiNodeGroupInfo(self, group_uuids):
"""Get the configuration of multiple node groups.
@param group_uuids: List of node group UUIDs
@rtype: list
@return: List of tuples of (group_uuid, group_info)
"""
return [(uuid, self._UnlockedGetNodeGroup(uuid)) for uuid in group_uuids]
@locking.ssynchronized(_config_lock)
def AddInstance(self, instance, ec_id):
"""Add an instance to the config.
......
......@@ -1078,8 +1078,20 @@ class KVMHypervisor(hv_base.BaseHypervisor):
if mem_path:
kvm_cmd.extend(["-mem-path", mem_path, "-mem-prealloc"])
monitor_dev = ("unix:%s,server,nowait" %
self._InstanceMonitor(instance.name))
kvm_cmd.extend(["-monitor", monitor_dev])