Commit 1d328c1c authored by Petr Pudlak's avatar Petr Pudlak

Merge branch 'stable-2.11' into stable-2.12

* stable-2.11
  Bump revision to 2.11.2
  Prepare NEWS file for 2.11.2 release
  Document '--user-shutdown' cluster parameter
  Consider 'Cluster.enabled_user_shutdown' in instance queries
  Extend QA with cluster wide user shutdown tests
  Modify how the KVM daemon is started/stopped
  RPC to start/stop a daemon on a node
  KVM daemon decides if it should run
  Add hypervisor list, user shutdown, and vm capable to Ssconf
  Add 'enabled_user_shutdown' cluster field
  Improve Python conditionals 'is None' and 'is not None'
  Reuse existing helper function to stop a daemon
  Remove unused local variable
  Fix docstring and relax type contraints
  Fix docstrings for hvparams
  Fix docstring
  Improve python dict check
  Fix instance user shutdown QA

* stable-2.10
  Remove 'physical_id' from testing data
  Support disk hotplug with userspace access
  Check for SSL encoding inconsistencies

Conflicts:
	NEWS
	configure.ac
	lib/bootstrap.py
	lib/client/gnt_cluster.py
	lib/cmdlib/cluster.py
	lib/cmdlib/common.py
	lib/objects.py
	man/gnt-cluster.rst
	qa/ganeti-qa.py
	src/Ganeti/Objects.hs
	src/Ganeti/OpCodes.hs
	src/Ganeti/Query/Server.hs
	src/Ganeti/Ssconf.hs
	test/data/instance-prim-sec.txt
	test/hs/Test/Ganeti/OpCodes.hs
	test/hs/Test/Ganeti/Ssconf.hs
	test/py/cfgupgrade_unittest.py
	test/py/daemon-util_unittest.bash
	tools/cfgupgrade
Resolutions:
  - Merge newly added configuration options in 2.11 and 2.12
  - Include KVMD changes from 2.11
  - Update the testing 2.11 configuration for up/downgrades
Signed-off-by: default avatarPetr Pudlak <pudlak@google.com>
Reviewed-by: default avatarKlaus Aehlig <aehlig@google.com>
parents 10e7e07e f2609511
......@@ -39,6 +39,33 @@ New features
user-specified ones.
Version 2.11.2
--------------
*(Released Fri, 13 Jun 2014)*
- Improvements to KVM wrt to the kvmd and instance shutdown behavior.
WARNING: In contrast to our standard policy, this bug fix update
introduces new parameters to the configuration. This means in
particular that after an upgrade from 2.11.0 or 2.11.1, 'cfgupgrade'
needs to be run, either manually or explicitly by running
'gnt-cluster upgrade --to 2.11.2' (which requires that they
had configured the cluster with --enable-full-version).
This also means, that it is not easily possible to downgrade from
2.11.2 to 2.11.1 or 2.11.0. The only way is to go back to 2.10 and
back.
Inherited from the 2.10 branch:
- Check for SSL encoding inconsistencies
- Check drbd helper only in VM capable nodes
- Improvements in statistics utils
Inherited from the 2.9 branch:
- check-man-warnings: use C.UTF-8 and set LC_ALL
Version 2.11.1
--------------
......
......@@ -32,6 +32,7 @@ DAEMONS=(
ganeti-wconfd
ganeti-rapi
ganeti-luxid
ganeti-kvmd
)
# This is the list of daemons that are loaded on demand; they should only be
......
......@@ -565,10 +565,8 @@ def LeaveCluster(modify_ssh_setup):
except: # pylint: disable=W0702
logging.exception("Error while removing cluster secrets")
result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop", constants.CONFD])
if result.failed:
logging.error("Command %s failed with exitcode %s and error %s",
result.cmd, result.exit_code, result.output)
utils.StopDaemon(constants.CONFD)
utils.StopDaemon(constants.KVMD)
# Raise a custom exception (handled in ganeti-noded)
raise errors.QuitGanetiException(True, "Shutdown scheduled")
......@@ -1259,6 +1257,32 @@ def GetCryptoTokens(token_requests):
return tokens
def EnsureDaemon(daemon_name, run):
"""Ensures the given daemon is running or stopped.
@type daemon_name: string
@param daemon_name: name of the daemon (e.g., constants.KVMD)
@type run: bool
@param run: whether to start or stop the daemon
@rtype: bool
@return: 'True' if daemon successfully started/stopped,
'False' otherwise
"""
allowed_daemons = [constants.KVMD]
if daemon_name not in allowed_daemons:
fn = lambda _: False
elif run:
fn = utils.EnsureDaemon
else:
fn = utils.StopDaemon
return fn(daemon_name)
def GetBlockDevSizes(devices):
"""Return the size of the given block devices
......@@ -2143,8 +2167,8 @@ def HotplugDevice(instance, action, dev_type, device, extra, seq):
@param dev_type: the device type to hotplug
@type device: either L{objects.NIC} or L{objects.Disk}
@param device: the device object to hotplug
@type extra: string
@param extra: extra info used by hotplug code (e.g. disk link)
@type extra: tuple
@param extra: extra info used for disk hotplug (disk link, drive uri)
@type seq: int
@param seq: the index of the device from master perspective
@raise RPCFail: in case instance does not have KVM hypervisor
......@@ -2581,7 +2605,7 @@ def _RecursiveAssembleBD(disk, owner, as_primary):
return result
def BlockdevAssemble(disk, owner, as_primary, idx):
def BlockdevAssemble(disk, instance, as_primary, idx):
"""Activate a block device for an instance.
This is a wrapper over _RecursiveAssembleBD.
......@@ -2592,13 +2616,15 @@ def BlockdevAssemble(disk, owner, as_primary, idx):
"""
try:
result = _RecursiveAssembleBD(disk, owner, as_primary)
result = _RecursiveAssembleBD(disk, instance.name, as_primary)
if isinstance(result, BlockDev):
# pylint: disable=E1103
dev_path = result.dev_path
link_name = None
uri = None
if as_primary:
link_name = _SymlinkBlockDev(owner, dev_path, idx)
link_name = _SymlinkBlockDev(instance.name, dev_path, idx)
uri = _CalculateDeviceURI(instance, disk, result)
elif result:
return result, result
else:
......@@ -2608,7 +2634,7 @@ def BlockdevAssemble(disk, owner, as_primary, idx):
except OSError, err:
_Fail("Error while symlinking disk: %s", err, exc=True)
return dev_path, link_name
return dev_path, link_name, uri
def BlockdevShutdown(disk):
......
......@@ -553,15 +553,21 @@ def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
primary_ip_version=None, ipolicy=None,
prealloc_wipe_disks=False, use_external_mip_script=False,
hv_state=None, disk_state=None, enabled_disk_templates=None,
install_image=None, zeroing_image=None, compression_tools=None):
install_image=None, zeroing_image=None, compression_tools=None,
enabled_user_shutdown=False):
"""Initialise the cluster.
@type candidate_pool_size: int
@param candidate_pool_size: master candidate pool size
@type enabled_disk_templates: list of string
@param enabled_disk_templates: list of disk_templates to be used in this
cluster
@type enabled_user_shutdown: bool
@param enabled_user_shutdown: whether user shutdown is enabled cluster
wide
"""
# TODO: complete the docstring
if config.ConfigWriter.IsCluster():
......@@ -827,7 +833,8 @@ def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
osparams_private_cluster={},
install_image=install_image,
zeroing_image=zeroing_image,
compression_tools=compression_tools
compression_tools=compression_tools,
enabled_user_shutdown=enabled_user_shutdown,
)
master_node_config = objects.Node(name=hostname.name,
primary_ip=hostname.ip,
......
......@@ -85,6 +85,7 @@ __all__ = [
"EARLY_RELEASE_OPT",
"ENABLED_HV_OPT",
"ENABLED_DISK_TEMPLATES_OPT",
"ENABLED_USER_SHUTDOWN_OPT",
"ERROR_CODES_OPT",
"FAILURE_ONLY_OPT",
"FIELDS_OPT",
......@@ -1313,6 +1314,12 @@ ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
"disk templates",
type="string", default=None)
ENABLED_USER_SHUTDOWN_OPT = cli_option("--user-shutdown",
default=None,
dest="enabled_user_shutdown",
help="Whether user shutdown is enabled",
type="bool")
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
type="keyval", default={},
help="NIC parameters")
......
......@@ -278,6 +278,12 @@ def InitCluster(opts, args):
compression_tools = _GetCompressionTools(opts)
default_ialloc_params = opts.default_iallocator_params
if opts.enabled_user_shutdown:
enabled_user_shutdown = True
else:
enabled_user_shutdown = False
bootstrap.InitCluster(cluster_name=args[0],
secondary_ip=opts.secondary_ip,
vg_name=vg_name,
......@@ -310,7 +316,8 @@ def InitCluster(opts, args):
enabled_disk_templates=enabled_disk_templates,
install_image=install_image,
zeroing_image=zeroing_image,
compression_tools=compression_tools
compression_tools=compression_tools,
enabled_user_shutdown=enabled_user_shutdown,
)
op = opcodes.OpClusterPostInit()
SubmitOpCode(op, opts=opts)
......@@ -566,6 +573,7 @@ def ShowClusterConfig(opts, args):
result["instance_communication_network"]),
("zeroing image", result["zeroing_image"]),
("compression tools", result["compression_tools"]),
("enabled user shutdown", result["enabled_user_shutdown"]),
]),
("Default node parameters",
......@@ -1158,7 +1166,9 @@ def SetClusterParams(opts, args):
opts.instance_communication_network is not None or
opts.zeroing_image is not None or
opts.shared_file_storage_dir is not None or
opts.compression_tools is not None):
opts.compression_tools is not None or
opts.shared_file_storage_dir is not None or
opts.enabled_user_shutdown is not None):
ToStderr("Please give at least one of the parameters.")
return 1
......@@ -1276,7 +1286,8 @@ def SetClusterParams(opts, args):
instance_communication_network=opts.instance_communication_network,
zeroing_image=opts.zeroing_image,
shared_file_storage_dir=opts.shared_file_storage_dir,
compression_tools=compression_tools
compression_tools=compression_tools,
enabled_user_shutdown=opts.enabled_user_shutdown,
)
return base.GetResult(None, opts, SubmitOrSend(op, opts))
......@@ -2142,7 +2153,9 @@ commands = {
GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT,
HV_STATE_OPT, DISK_STATE_OPT, ENABLED_DISK_TEMPLATES_OPT,
IPOLICY_STD_SPECS_OPT, GLOBAL_GLUSTER_FILEDIR_OPT, INSTALL_IMAGE_OPT,
ZEROING_IMAGE_OPT, COMPRESSION_TOOLS_OPT]
ZEROING_IMAGE_OPT, COMPRESSION_TOOLS_OPT,
ENABLED_USER_SHUTDOWN_OPT,
]
+ INSTANCE_POLICY_OPTS + SPLIT_ISPECS_OPTS,
"[opts...] <cluster_name>", "Initialises a new cluster configuration"),
"destroy": (
......@@ -2225,7 +2238,8 @@ commands = {
DEFAULT_IALLOCATOR_PARAMS_OPT, RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT,
PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT,
DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT] + SUBMIT_OPTS +
[ENABLED_DISK_TEMPLATES_OPT, IPOLICY_STD_SPECS_OPT, MODIFY_ETCHOSTS_OPT] +
[ENABLED_DISK_TEMPLATES_OPT, IPOLICY_STD_SPECS_OPT, MODIFY_ETCHOSTS_OPT,
ENABLED_USER_SHUTDOWN_OPT] +
INSTANCE_POLICY_OPTS +
[GLOBAL_FILEDIR_OPT, GLOBAL_SHARED_FILEDIR_OPT, ZEROING_IMAGE_OPT,
COMPRESSION_TOOLS_OPT],
......
......@@ -58,7 +58,8 @@ from ganeti.cmdlib.common import ShareAll, RunPostHook, \
CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
CheckDiskAccessModeConsistency, CreateNewClientCert, \
AddInstanceCommunicationNetworkOp, ConnectInstanceCommunicationNetworkOp, \
CheckImageValidity
CheckImageValidity, \
CheckDiskAccessModeConsistency, CreateNewClientCert, EnsureKvmdOnNodes
import ganeti.masterd.instance
......@@ -418,6 +419,7 @@ class LUClusterQuery(NoHooksLU):
"install_image": cluster.install_image,
"instance_communication_network": cluster.instance_communication_network,
"compression_tools": cluster.compression_tools,
"enabled_user_shutdown": cluster.enabled_user_shutdown,
}
return result
......@@ -1245,8 +1247,7 @@ class LUClusterSetParams(LogicalUnit):
# changes to the hypervisor list
if self.op.enabled_hypervisors is not None:
self.hv_list = self.op.enabled_hypervisors
for hv in self.hv_list:
for hv in self.op.enabled_hypervisors:
# if the hypervisor doesn't already exist in the cluster
# hvparams, we initialize it to empty, and then (in both
# cases) we make sure to fill the defaults, as we might not
......@@ -1256,8 +1257,6 @@ class LUClusterSetParams(LogicalUnit):
new_hvp[hv] = {}
new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
else:
self.hv_list = cluster.enabled_hypervisors
if self.op.hvparams or self.op.enabled_hypervisors is not None:
# either the enabled list has changed, or the parameters have, validate
......@@ -1544,6 +1543,8 @@ class LUClusterSetParams(LogicalUnit):
# re-read the fresh configuration again
self.cluster = self.cfg.GetClusterInfo()
ensure_kvmd = False
if self.op.hvparams:
self.cluster.hvparams = self.new_hvparams
if self.op.os_hvp:
......@@ -1551,6 +1552,7 @@ class LUClusterSetParams(LogicalUnit):
if self.op.enabled_hypervisors is not None:
self.cluster.hvparams = self.new_hvparams
self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
ensure_kvmd = True
if self.op.beparams:
self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
if self.op.nicparams:
......@@ -1611,6 +1613,11 @@ class LUClusterSetParams(LogicalUnit):
if self.op.use_external_mip_script is not None:
self.cluster.use_external_mip_script = self.op.use_external_mip_script
if self.op.enabled_user_shutdown is not None and \
self.cluster.enabled_user_shutdown != self.op.enabled_user_shutdown:
self.cluster.enabled_user_shutdown = self.op.enabled_user_shutdown
ensure_kvmd = True
def helper_os(aname, mods, desc):
desc += " OS list"
lst = getattr(self.cluster, aname)
......@@ -1685,6 +1692,13 @@ class LUClusterSetParams(LogicalUnit):
result.Warn("Could not re-enable the master ip on the master,"
" please restart manually", self.LogWarning)
# Even though 'self.op.enabled_user_shutdown' is being tested
# above, the RPCs can only be done after 'self.cfg.Update' because
# this will update the cluster object and sync 'Ssconf', and kvmd
# uses 'Ssconf'.
if ensure_kvmd:
EnsureKvmdOnNodes(self, feedback_fn)
if self.op.compression_tools is not None:
self.cfg.SetCompressionTools(self.op.compression_tools)
......
......@@ -1464,3 +1464,65 @@ def DetermineImageSize(lu, image, node_uuid):
# Finally, the conversion
return math.ceil(byte_size / 1024. / 1024.)
def EnsureKvmdOnNodes(lu, feedback_fn, nodes=None):
"""Ensure KVM daemon is running on nodes with KVM instances.
If user shutdown is enabled in the cluster:
- The KVM daemon will be started on VM capable nodes containing
KVM instances.
- The KVM daemon will be stopped on non VM capable nodes.
If user shutdown is disabled in the cluster:
- The KVM daemon will be stopped on all nodes
Issues a warning for each failed RPC call.
@type lu: L{LogicalUnit}
@param lu: logical unit on whose behalf we execute
@type feedback_fn: callable
@param feedback_fn: feedback function
@type nodes: list of string
@param nodes: if supplied, it overrides the node uuids to start/stop;
this is used mainly for optimization
"""
cluster = lu.cfg.GetClusterInfo()
# Either use the passed nodes or consider all cluster nodes
if nodes is not None:
node_uuids = set(nodes)
else:
node_uuids = lu.cfg.GetNodeList()
# Determine in which nodes should the KVM daemon be started/stopped
if constants.HT_KVM in cluster.enabled_hypervisors and \
cluster.enabled_user_shutdown:
start_nodes = []
stop_nodes = []
for node_uuid in node_uuids:
if lu.cfg.GetNodeInfo(node_uuid).vm_capable:
start_nodes.append(node_uuid)
else:
stop_nodes.append(node_uuid)
else:
start_nodes = []
stop_nodes = node_uuids
# Start KVM where necessary
if start_nodes:
results = lu.rpc.call_node_ensure_daemon(start_nodes, constants.KVMD, True)
for node_uuid in start_nodes:
results[node_uuid].Warn("Failed to start KVM daemon in node '%s'" %
node_uuid, feedback_fn)
# Stop KVM where necessary
if stop_nodes:
results = lu.rpc.call_node_ensure_daemon(stop_nodes, constants.KVMD, False)
for node_uuid in stop_nodes:
results[node_uuid].Warn("Failed to stop KVM daemon in node '%s'" %
node_uuid, feedback_fn)
......@@ -3773,16 +3773,16 @@ class LUInstanceSetParams(LogicalUnit):
if self.op.hotplug:
result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
(disk, self.instance),
self.instance.name, True, idx)
self.instance, True, idx)
if result.fail_msg:
changes.append(("disk/%d" % idx, "assemble:failed"))
self.LogWarning("Can't assemble newly created disk %d: %s",
idx, result.fail_msg)
else:
_, link_name = result.payload
_, link_name, uri = result.payload
msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
constants.HOTPLUG_TARGET_DISK,
disk, link_name, idx)
disk, (link_name, uri), idx)
changes.append(("disk/%d" % idx, msg))
return (disk, changes)
......
......@@ -209,11 +209,11 @@ class LUInstanceShutdown(LogicalUnit):
"""Check arguments.
"""
if self.op.no_remember and self.op.admin_state_source:
if self.op.no_remember and self.op.admin_state_source is not None:
self.LogWarning("Parameter 'admin_state_source' has no effect if used"
" with parameter 'no_remember'")
if not self.op.admin_state_source:
if self.op.admin_state_source is None:
self.op.admin_state_source = constants.ADMIN_SOURCE
def BuildHooksEnv(self):
......
......@@ -232,8 +232,9 @@ class LUInstanceQueryData(NoHooksLU):
remote_info = remote_info.payload
allow_userdown = \
instance.hypervisor != constants.HT_KVM or \
hvparams[constants.HV_KVM_USER_SHUTDOWN]
cluster.enabled_user_shutdown and \
(instance.hypervisor != constants.HT_KVM or
hvparams[constants.HV_KVM_USER_SHUTDOWN])
if remote_info and "state" in remote_info:
if hv_base.HvInstanceState.IsShutdown(remote_info["state"]):
......
......@@ -1400,7 +1400,7 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
node_disk = node_disk.Copy()
node_disk.UnsetSize()
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance.name, False, idx)
instance, False, idx)
msg = result.fail_msg
if msg:
secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance.uuid)
......@@ -1426,7 +1426,7 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
node_disk = node_disk.Copy()
node_disk.UnsetSize()
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
instance.name, True, idx)
instance, True, idx)
msg = result.fail_msg
if msg:
lu.LogWarning("Could not prepare block device %s on node %s"
......@@ -1434,7 +1434,7 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
disks_ok = False
else:
dev_path, _ = result.payload
dev_path, _, __ = result.payload
device_info.append((lu.cfg.GetNodeName(instance.primary_node),
inst_disk.iv_name, dev_path))
......
......@@ -43,7 +43,8 @@ from ganeti.cmdlib.common import CheckParamsNotGlobal, \
AdjustCandidatePool, CheckIAllocatorOrNode, LoadNodeEvacResult, \
GetWantedNodes, MapInstanceLvsToNodes, RunPostHook, \
FindFaultyInstanceDisks, CheckStorageTypeEnabled, CreateNewClientCert, \
AddNodeCertToCandidateCerts, RemoveNodeCertFromCandidateCerts
AddNodeCertToCandidateCerts, RemoveNodeCertFromCandidateCerts, \
EnsureKvmdOnNodes
def _DecideSelfPromotion(lu, exceptions=None):
......@@ -426,6 +427,8 @@ class LUNodeAdd(LogicalUnit):
else:
self.cfg.RemoveNodeFromCandidateCerts(self.new_node.uuid, warn_fn=None)
EnsureKvmdOnNodes(self, feedback_fn, nodes=[self.new_node.uuid])
class LUNodeSetParams(LogicalUnit):
"""Modifies the parameters of a node.
......@@ -804,6 +807,8 @@ class LUNodeSetParams(LogicalUnit):
if [self.old_role, self.new_role].count(self._ROLE_CANDIDATE) == 1:
self.context.ReaddNode(node)
EnsureKvmdOnNodes(self, feedback_fn, nodes=[node.uuid])
return result
......
......@@ -2994,6 +2994,8 @@ class ConfigWriter(object):
for ninfo in node_infos]
node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
for ninfo in node_infos]
node_vm_capable = ["%s=%s" % (ninfo.name, str(ninfo.vm_capable))
for ninfo in node_infos]
instance_data = fn(instance_names)
off_data = fn(node.name for node in node_infos if node.offline)
......@@ -3004,6 +3006,7 @@ class ConfigWriter(object):
node_data = fn(node_names)
node_pri_ips_data = fn(node_pri_ips)
node_snd_ips_data = fn(node_snd_ips)
node_vm_capable_data = fn(node_vm_capable)
cluster = self._ConfigData().cluster
cluster_tags = fn(cluster.GetTags())
......@@ -3040,6 +3043,7 @@ class ConfigWriter(object):
constants.SS_NODE_LIST: node_data,
constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
constants.SS_NODE_VM_CAPABLE: node_vm_capable_data,
constants.SS_OFFLINE_NODES: off_data,
constants.SS_ONLINE_NODES: on_data,
constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family),
......@@ -3050,6 +3054,7 @@ class ConfigWriter(object):
constants.SS_UID_POOL: uid_pool,
constants.SS_NODEGROUPS: nodegroups_data,
constants.SS_NETWORKS: networks_data,
constants.SS_ENABLED_USER_SHUTDOWN: str(cluster.enabled_user_shutdown),
}
ssconf_values = self._ExtendByAllHvparamsStrings(ssconf_values,
all_hvparams)
......
......@@ -485,7 +485,7 @@ class HttpServer(http.HttpBase, asyncore.dispatcher):
@param ssl_verify_peer: Whether to require client certificate
and compare it with our certificate
@type request_executor_class: class
@param request_executor_class: an class derived from the
@param request_executor_class: a class derived from the
HttpServerRequestExecutor class
"""
......
......@@ -101,12 +101,33 @@ _RUNTIME_DEVICE = {
}
_RUNTIME_ENTRY = {
constants.HOTPLUG_TARGET_NIC: lambda d, e: d,
constants.HOTPLUG_TARGET_DISK: lambda d, e: (d, e, None)
constants.HOTPLUG_TARGET_DISK: lambda d, e: (d, e[0], e[1])
}
_MIGRATION_CAPS_DELIM = ":"
def _GetDriveURI(disk, link, uri):
"""Helper function to get the drive uri to be used in --drive kvm option
@type disk: L{objects.Disk}
@param disk: A disk configuration object
@type link: string
@param link: The device link as returned by _SymlinkBlockDev()
@type uri: string
@param uri: The drive uri as returned by _CalculateDeviceURI()
"""
access_mode = disk.params.get(constants.LDP_ACCESS,
constants.DISK_KERNELSPACE)
if (uri and access_mode == constants.DISK_USERSPACE):
drive_uri = uri
else:
drive_uri = link
return drive_uri
def _GenerateDeviceKVMId(dev_type, dev):
"""Helper function to generate a unique device name used by KVM
......@@ -816,7 +837,7 @@ class KVMHypervisor(hv_base.BaseHypervisor):
@type instance_name: string
@param instance_name: the instance name
@type hvparams: dict of strings
@param hvparams: hvparams to be used with this instance
@param hvparams: hypervisor parameters to be used with this instance
@rtype: tuple of strings
@return: (name, id, memory, vcpus, stat, times)
......@@ -849,7 +870,7 @@ class KVMHypervisor(hv_base.BaseHypervisor):
"""Get properties of all instances.
@type hvparams: dict of strings
@param hvparams: hypervisor parameter
@param hvparams: hypervisor parameters
@return: list of tuples (name, id, memory, vcpus, stat, times)
"""
......@@ -937,12 +958,7 @@ class KVMHypervisor(hv_base.BaseHypervisor):
if needs_boot_flag and disk_type != constants.HT_DISK_IDE:
boot_val = ",boot=on"
access_mode = cfdev.params.get(constants.LDP_ACCESS,
constants.DISK_KERNELSPACE)
</