Commit 2cd855dd authored by Iustin Pop's avatar Iustin Pop
Browse files

Merge branch 'next'



* next: (34 commits)
  watcher: automatically restart noded/rapi
  watcher: handle full and drained queue cases
  rapi: rework error handling
  Fix backend.OSEnvironment be/hv parameters
  rapi: make tags query not use jobs
  Change failover instance when instance is stopped
  Export more instance information in hooks
  watcher: write the instance status to a file
  Fix the SafeEncoding behaviour
  Move more hypervisor strings into constants
  Add -H/-B startup parameters to gnt-instance
  call_instance_start: add optional hv/be parameters
  Fix gnt-job list argument handling
  Instance reinstall: don't mix up errors
  Don't check memory at startup if instance is up
  gnt-cluster modify: fix --no-lvm-storage
  LUSetClusterParams: improve volume group removal
  gnt-cluster info: show more cluster parameters
  LUQueryClusterInfo: return a few more fields
  Add the new DRBD test files to the Makefile
  ...
Signed-off-by: default avatarIustin Pop <iustin@google.com>
Reviewed-by: default avatarGuido Trotter <ultrotter@google.com>
parents 7a8994d4 c4f0219c
......@@ -199,9 +199,11 @@ maninput = $(patsubst %.7,%.7.in,$(patsubst %.8,%.8.in,$(man_MANS))) $(patsubst
TEST_FILES = \
test/data/bdev-both.txt \
test/data/bdev-8.3-both.txt \
test/data/bdev-disk.txt \
test/data/bdev-net.txt \
test/data/proc_drbd8.txt
test/data/proc_drbd8.txt \
test/data/proc_drbd83.txt
dist_TESTS = \
test/ganeti.bdev_unittest.py \
......@@ -222,7 +224,7 @@ nodist_TESTS =
TESTS = $(dist_TESTS) $(nodist_TESTS)
TESTS_ENVIRONMENT = PYTHONPATH=.:$(top_builddir)
TESTS_ENVIRONMENT = PYTHONPATH=.:$(top_builddir) $(PYTHON)
RAPI_RESOURCES = $(wildcard lib/rapi/*.py)
......
......@@ -36,7 +36,6 @@ import collections
import Queue
import random
import signal
import simplejson
import logging
from cStringIO import StringIO
......@@ -55,6 +54,7 @@ from ganeti import ssconf
from ganeti import workerpool
from ganeti import rpc
from ganeti import bootstrap
from ganeti import serializer
CLIENT_REQUEST_WORKERS = 16
......@@ -152,7 +152,7 @@ class ClientRqHandler(SocketServer.BaseRequestHandler):
logging.debug("client closed connection")
break
request = simplejson.loads(msg)
request = serializer.LoadJson(msg)
logging.debug("request: %s", request)
if not isinstance(request, dict):
logging.error("wrong request received: %s", msg)
......@@ -181,7 +181,7 @@ class ClientRqHandler(SocketServer.BaseRequestHandler):
luxi.KEY_RESULT: result,
}
logging.debug("response: %s", response)
self.send_message(simplejson.dumps(response))
self.send_message(serializer.DumpJson(response))
def read_message(self):
while not self._msgs:
......
......@@ -721,7 +721,7 @@ def ParseOptions():
"""
parser = OptionParser(description="Ganeti node daemon",
usage="%prog [-f] [-d]",
usage="%prog [-f] [-d] [-b ADDRESS]",
version="%%prog (ganeti) %s" %
constants.RELEASE_VERSION)
......@@ -731,6 +731,10 @@ def ParseOptions():
parser.add_option("-d", "--debug", dest="debug",
help="Enable some debug messages",
default=False, action="store_true")
parser.add_option("-b", "--bind", dest="bind_address",
help="Bind address",
default="", metavar="ADDRESS")
options, args = parser.parse_args()
return options, args
......@@ -781,7 +785,7 @@ def main():
queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
mainloop = daemon.Mainloop()
server = NodeHttpServer(mainloop, "", port,
server = NodeHttpServer(mainloop, options.bind_address, port,
ssl_params=ssl_params, ssl_verify_peer=True)
server.Start()
try:
......
......@@ -80,6 +80,20 @@ def StartMaster():
return not result.failed
def EnsureDaemon(daemon):
"""Check for and start daemon if not alive.
"""
pidfile = utils.DaemonPidFileName(daemon)
pid = utils.ReadPidFile(pidfile)
if pid == 0 or not utils.IsProcessAlive(pid): # no file or dead pid
logging.debug("Daemon '%s' not alive, trying to restart", daemon)
result = utils.RunCmd([daemon])
if not result:
logging.error("Can't start daemon '%s', failure %s, output: %s",
daemon, result.fail_reason, result.output)
class WatcherState(object):
"""Interface to a state file recording restart attempts.
......@@ -255,10 +269,17 @@ def GetClusterData():
all_results = cli.PollJob(job_id, cl=client, feedback_fn=logging.debug)
logging.debug("Got data from cluster, writing instance status file")
result = all_results[0]
smap = {}
instances = {}
# write the upfile
up_data = "".join(["%s %s\n" % (fields[0], fields[1]) for fields in result])
utils.WriteFile(file_name=constants.INSTANCE_UPFILE, data=up_data)
for fields in result:
(name, status, autostart, snodes) = fields
......@@ -291,6 +312,9 @@ class Watcher(object):
master = client.QueryConfigValues(["master_node"])[0]
if master != utils.HostInfo().name:
raise NotMasterError("This is not the master node")
# first archive old jobs
self.ArchiveJobs(opts.job_age)
# and only then submit new ones
self.instances, self.bootids, self.smap = GetClusterData()
self.started_instances = set()
self.opts = opts
......@@ -300,12 +324,12 @@ class Watcher(object):
"""
notepad = self.notepad
self.ArchiveJobs(self.opts.job_age)
self.CheckInstances(notepad)
self.CheckDisks(notepad)
self.VerifyDisks()
def ArchiveJobs(self, age):
@staticmethod
def ArchiveJobs(age):
"""Archive old jobs.
"""
......@@ -452,8 +476,12 @@ def main():
utils.SetupLogging(constants.LOG_WATCHER, debug=options.debug,
stderr_logging=options.debug)
update_file = True
update_file = False
try:
# on master or not, try to start the node dameon (use _PID but is
# the same as daemon name)
EnsureDaemon(constants.NODED_PID)
notepad = WatcherState()
try:
try:
......@@ -461,24 +489,30 @@ def main():
except errors.OpPrereqError:
# this is, from cli.GetClient, a not-master case
logging.debug("Not on master, exiting")
update_file = True
sys.exit(constants.EXIT_SUCCESS)
except luxi.NoMasterError, err:
logging.warning("Master seems to be down (%s), trying to restart",
str(err))
if not StartMaster():
logging.critical("Can't start the master, exiting")
update_file = False
sys.exit(constants.EXIT_FAILURE)
# else retry the connection
client = cli.GetClient()
# we are on master now (use _PID but is the same as daemon name)
EnsureDaemon(constants.RAPI_PID)
try:
watcher = Watcher(options, notepad)
except errors.ConfigurationError:
# Just exit if there's no configuration
update_file = True
sys.exit(constants.EXIT_SUCCESS)
watcher.Run()
update_file = True
finally:
if update_file:
notepad.Save()
......@@ -492,6 +526,10 @@ def main():
except errors.ResolverError, err:
logging.error("Cannot resolve hostname '%s', exiting.", err.args[0])
sys.exit(constants.EXIT_NODESETUP_ERROR)
except errors.JobQueueFull:
logging.error("Job queue is full, can't query cluster state")
except errors.JobQueueDrainError:
logging.error("Job queue is drained, can't maintain cluster state")
except Exception, err:
logging.error(str(err), exc_info=True)
sys.exit(constants.EXIT_FAILURE)
......
......@@ -16,17 +16,22 @@ DESC="Ganeti cluster"
GANETIRUNDIR="@LOCALSTATEDIR@/run/ganeti"
GANETI_DEFAULTS_FILE="@SYSCONFDIR@/default/ganeti"
NODED_NAME="ganeti-noded"
NODED="@PREFIX@/sbin/${NODED_NAME}"
NODED_PID="${GANETIRUNDIR}/${NODED_NAME}.pid"
NODED_ARGS=""
MASTERD_NAME="ganeti-masterd"
MASTERD="@PREFIX@/sbin/${MASTERD_NAME}"
MASTERD_PID="${GANETIRUNDIR}/${MASTERD_NAME}.pid"
MASTERD_ARGS=""
RAPI_NAME="ganeti-rapi"
RAPI="@PREFIX@/sbin/${RAPI_NAME}"
RAPI_PID="${GANETIRUNDIR}/${RAPI_NAME}.pid"
RAPI_ARGS=""
SCRIPTNAME="@SYSCONFDIR@/init.d/ganeti"
......@@ -34,6 +39,10 @@ test -f $NODED || exit 0
. /lib/lsb/init-functions
if [ -s $GANETI_DEFAULTS_FILE ]; then
. $GANETI_DEFAULTS_FILE
fi
check_config() {
for fname in \
"@LOCALSTATEDIR@/lib/ganeti/server.pem"
......@@ -84,16 +93,16 @@ case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
check_config
start_action $NODED $NODED_PID
start_action $MASTERD $MASTERD_PID
start_action $RAPI $RAPI_PID
;;
start_action $NODED $NODED_PID $NODED_ARGS
start_action $MASTERD $MASTERD_PID $MASTERD_ARGS
start_action $RAPI $RAPI_PID $RAPI_ARGS
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
stop_action $RAPI $RAPI_PID
stop_action $MASTERD $MASTERD_PID
stop_action $NODED $NODED_PID
;;
;;
restart|force-reload)
log_daemon_msg "Reloading $DESC"
stop_action $RAPI $RAPI_PID
......@@ -103,11 +112,11 @@ case "$1" in
start_action $NODED $NODED_PID
start_action $MASTERD $MASTERD_PID
start_action $RAPI $RAPI_PID
;;
;;
*)
log_success_msg "Usage: $SCRIPTNAME {start|stop|force-reload|restart}"
exit 1
;;
;;
esac
exit 0
......@@ -1690,6 +1690,10 @@ def OSEnvironment(instance, debug=0):
result['NIC_%d_FRONTEND_TYPE' % idx] = \
instance.hvparams[constants.HV_NIC_TYPE]
for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
for key, value in source.items():
result["INSTANCE_%s_%s" % (kind, key)] = str(value)
return result
def BlockdevGrow(disk, amount):
......
......@@ -563,7 +563,7 @@ class DRBD8Status(object):
"""
UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+st:([^/]+)/(\S+)"
LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
"\s+ds:([^/]+)/(\S+)\s+.*$")
SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
"\sfinish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
......@@ -896,15 +896,20 @@ class DRBD8(BaseDRBD):
# value types
value = pyp.Word(pyp.alphanums + '_-/.:')
quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
addr_port = (pyp.Word(pyp.nums + '.') + pyp.Literal(':').suppress() +
number)
addr_type = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
pyp.Optional(pyp.Literal("ipv6")).suppress())
addr_port = (addr_type + pyp.Word(pyp.nums + '.') +
pyp.Literal(':').suppress() + number)
# meta device, extended syntax
meta_value = ((value ^ quoted) + pyp.Literal('[').suppress() +
number + pyp.Word(']').suppress())
# device name, extended syntax
device_value = pyp.Literal("minor").suppress() + number
# a statement
stmt = (~rbrace + keyword + ~lbrace +
pyp.Optional(addr_port ^ value ^ quoted ^ meta_value) +
pyp.Optional(addr_port ^ value ^ quoted ^ meta_value ^
device_value) +
pyp.Optional(defa) + semi +
pyp.Optional(pyp.restOfLine).suppress())
......
......@@ -25,7 +25,6 @@
import os
import os.path
import sha
import re
import logging
import tempfile
......
......@@ -815,6 +815,8 @@ def GenerateTable(headers, fields, separator, data,
format = separator.replace("%", "%%").join(format_fields)
for row in data:
if row is None:
continue
for idx, val in enumerate(row):
if unitfields.Matches(fields[idx]):
try:
......@@ -840,6 +842,8 @@ def GenerateTable(headers, fields, separator, data,
for line in data:
args = []
if line is None:
line = ['-' for _ in fields]
for idx in xrange(len(fields)):
if separator is None:
args.append(mlens[idx])
......
......@@ -25,7 +25,6 @@
import os
import os.path
import sha
import time
import tempfile
import re
......@@ -454,7 +453,8 @@ def _CheckNodeNotDrained(lu, node):
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
memory, vcpus, nics, disk_template, disks):
memory, vcpus, nics, disk_template, disks,
bep, hvp, hypervisor):
"""Builds instance related env variables for hooks
This builds the hook environment from individual variables.
......@@ -480,6 +480,12 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
@param disk_template: the distk template of the instance
@type disks: list
@param disks: the list of (size, mode) pairs
@type bep: dict
@param bep: the backend parameters for the instance
@type hvp: dict
@param hvp: the hypervisor parameters for the instance
@type hypervisor: string
@param hypervisor: the hypervisor for the instance
@rtype: dict
@return: the hook environment for this instance
......@@ -498,6 +504,7 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
"INSTANCE_MEMORY": memory,
"INSTANCE_VCPUS": vcpus,
"INSTANCE_DISK_TEMPLATE": disk_template,
"INSTANCE_HYPERVISOR": hypervisor,
}
if nics:
......@@ -523,6 +530,10 @@ def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
env["INSTANCE_DISK_COUNT"] = disk_count
for source, kind in [(bep, "BE"), (hvp, "HV")]:
for key, value in source.items():
env["INSTANCE_%s_%s" % (kind, key)] = value
return env
......@@ -541,7 +552,9 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
@return: the hook environment dictionary
"""
bep = lu.cfg.GetClusterInfo().FillBE(instance)
cluster = lu.cfg.GetClusterInfo()
bep = cluster.FillBE(instance)
hvp = cluster.FillHV(instance)
args = {
'name': instance.name,
'primary_node': instance.primary_node,
......@@ -553,6 +566,9 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None):
'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
'disk_template': instance.disk_template,
'disks': [(disk.size, disk.mode) for disk in instance.disks],
'bep': bep,
'hvp': hvp,
'hypervisor': instance.hypervisor,
}
if override:
args.update(override)
......@@ -1524,8 +1540,11 @@ class LUSetClusterParams(LogicalUnit):
"""
if self.op.vg_name is not None:
if self.op.vg_name != self.cfg.GetVGName():
self.cfg.SetVGName(self.op.vg_name)
new_volume = self.op.vg_name
if not new_volume:
new_volume = None
if new_volume != self.cfg.GetVGName():
self.cfg.SetVGName(new_volume)
else:
feedback_fn("Cluster LVM configuration already in desired"
" state, not changing")
......@@ -2441,6 +2460,10 @@ class LUQueryClusterInfo(NoHooksLU):
for hypervisor in cluster.enabled_hypervisors]),
"beparams": cluster.beparams,
"candidate_pool_size": cluster.candidate_pool_size,
"default_bridge": cluster.default_bridge,
"master_netdev": cluster.master_netdev,
"volume_group_name": cluster.volume_group_name,
"file_storage_dir": cluster.file_storage_dir,
}
return result
......@@ -2755,15 +2778,48 @@ class LUStartupInstance(LogicalUnit):
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
# extra beparams
self.beparams = getattr(self.op, "beparams", {})
if self.beparams:
if not isinstance(self.beparams, dict):
raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
" dict" % (type(self.beparams), ))
# fill the beparams dict
utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
self.op.beparams = self.beparams
# extra hvparams
self.hvparams = getattr(self.op, "hvparams", {})
if self.hvparams:
if not isinstance(self.hvparams, dict):
raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
" dict" % (type(self.hvparams), ))
# check hypervisor parameter syntax (locally)
cluster = self.cfg.GetClusterInfo()
utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
filled_hvp = cluster.FillDict(cluster.hvparams[instance.hypervisor],
instance.hvparams)
filled_hvp.update(self.hvparams)
hv_type = hypervisor.GetHypervisor(instance.hypervisor)
hv_type.CheckParameterSyntax(filled_hvp)
_CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
self.op.hvparams = self.hvparams
_CheckNodeOnline(self, instance.primary_node)
bep = self.cfg.GetClusterInfo().FillBE(instance)
# check bridges existance
_CheckInstanceBridgesExist(self, instance)
_CheckNodeFreeMemory(self, instance.primary_node,
"starting instance %s" % instance.name,
bep[constants.BE_MEMORY], instance.hypervisor)
remote_info = self.rpc.call_instance_info(instance.primary_node,
instance.name,
instance.hypervisor)
remote_info.Raise()
if not remote_info.data:
_CheckNodeFreeMemory(self, instance.primary_node,
"starting instance %s" % instance.name,
bep[constants.BE_MEMORY], instance.hypervisor)
def Exec(self, feedback_fn):
"""Start the instance.
......@@ -2778,7 +2834,8 @@ class LUStartupInstance(LogicalUnit):
_StartInstanceDisks(self, instance, force)
result = self.rpc.call_instance_start(node_current, instance)
result = self.rpc.call_instance_start(node_current, instance,
self.hvparams, self.beparams)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
......@@ -2860,7 +2917,7 @@ class LURebootInstance(LogicalUnit):
" full reboot: %s" % msg)
_ShutdownInstanceDisks(self, instance)
_StartInstanceDisks(self, instance, ignore_secondaries)
result = self.rpc.call_instance_start(node_current, instance)
result = self.rpc.call_instance_start(node_current, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
......@@ -2960,7 +3017,8 @@ class LUReinstallInstance(LogicalUnit):
remote_info = self.rpc.call_instance_info(instance.primary_node,
instance.name,
instance.hypervisor)
if remote_info.failed or remote_info.data:
remote_info.Raise()
if remote_info.data:
raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
(self.op.instance_name,
instance.primary_node))
......@@ -3478,10 +3536,15 @@ class LUFailoverInstance(LogicalUnit):
target_node = secondary_nodes[0]
_CheckNodeOnline(self, target_node)
_CheckNodeNotDrained(self, target_node)
# check memory requirements on the secondary node
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
instance.name, bep[constants.BE_MEMORY],
instance.hypervisor)
if instance.admin_up:
# check memory requirements on the secondary node
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
instance.name, bep[constants.BE_MEMORY],
instance.hypervisor)
else:
self.LogInfo("Not checking memory on the secondary node as"
" instance will not be started")
# check bridge existance
brlist = [nic.bridge for nic in instance.nics]
......@@ -3550,7 +3613,7 @@ class LUFailoverInstance(LogicalUnit):
raise errors.OpExecError("Can't activate the instance's disks")
feedback_fn("* starting the instance on the target node")
result = self.rpc.call_instance_start(target_node, instance)
result = self.rpc.call_instance_start(target_node, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
......@@ -4300,6 +4363,7 @@ class LUCreateInstance(LogicalUnit):
self.op.hvparams)
hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
hv_type.CheckParameterSyntax(filled_hvp)
self.hv_full = filled_hvp
# fill and remember the beparams dict
utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
......@@ -4477,6 +4541,9 @@ class LUCreateInstance(LogicalUnit):
nics=[(n.ip, n.bridge, n.mac) for n in self.nics],
disk_template=self.op.disk_template,
disks=[(d["size"], d["mode"]) for d in self.disks],
bep=self.be_full,
hvp=self.hv_full,
hypervisor=self.op.hypervisor,
))
nl = ([self.cfg.GetMasterNode(), self.op.pnode] +
......@@ -4794,7 +4861,7 @@ class LUCreateInstance(LogicalUnit):
self.cfg.Update(iobj)
logging.info("Starting instance %s on node %s", instance, pnode_name)
feedback_fn("* starting instance...")
result = self.rpc.call_instance_start(pnode_name, iobj)
result = self.rpc.call_instance_start(pnode_name, iobj, None, None)
msg = result.RemoteFailMsg()
if msg:
raise errors.OpExecError("Could not start instance: %s" % msg)
......@@ -6242,7 +6309,7 @@ class LUExportInstance(LogicalUnit):
finally:
if self.op.shutdown and instance.admin_up:
result = self.rpc.call_instance_start(src_node, instance)
result = self.rpc.call_instance_start(src_node, instance, None, None)
msg = result.RemoteFailMsg()
if msg:
_ShutdownInstanceDisks(self, instance)
......
......@@ -96,6 +96,7 @@ CLUSTER_CONF_FILE = DATA_DIR + "/config.data"
SSL_CERT_FILE = DATA_DIR + "/server.pem"
RAPI_CERT_FILE = DATA_DIR + "/rapi.pem"
WATCHER_STATEFILE = DATA_DIR + "/watcher.data"
INSTANCE_UPFILE = RUN_GANETI_DIR + "/instance-status"
SSH_KNOWN_HOSTS_FILE = DATA_DIR + "/known_hosts"
RAPI_USERS_FILE = DATA_DIR + "/rapi_users"
QUEUE_DIR = DATA_DIR + "/queue"
......@@ -356,7 +357,7 @@ VNC_BASE_PORT = 5900
VNC_PASSWORD_FILE = _autoconf.SYSCONFDIR + "/ganeti/vnc-cluster-password"
VNC_DEFAULT_BIND_ADDRESS = '0.0.0.0'
# Device types
# NIC types
HT_NIC_RTL8139 = "rtl8139"
HT_NIC_NE2K_PCI = "ne2k_pci"
HT_NIC_NE2K_ISA = "ne2k_isa"
......@@ -366,25 +367,40 @@ HT_NIC_I8259ER = "i82559er"
HT_NIC_PCNET = "pcnet"
HT_NIC_E1000 = "e1000"
HT_NIC_PARAVIRTUAL = HT_DISK_PARAVIRTUAL = "paravirtual"
HT_DISK_IOEMU = "ioemu"
HT_DISK_IDE = "ide"
HT_DISK_SCSI = "scsi"
HT_DISK_SD = "sd"
HT_DISK_MTD = "mtd"
HT_DISK_PFLASH = "pflash"
HT_HVM_VALID_NIC_TYPES = frozenset([HT_NIC_RTL8139, HT_NIC_NE2K_PCI,
HT_NIC_NE2K_ISA, HT_NIC_PARAVIRTUAL])
HT_HVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IOEMU])
HT_KVM_VALID_NIC_TYPES = frozenset([HT_NIC_RTL8139, HT_NIC_NE2K_PCI,
HT_NIC_NE2K_ISA, HT_NIC_I82551,
HT_NIC_I85557B, HT_NIC_I8259ER,
HT_NIC_PCNET, HT_NIC_E1000,
HT_NIC_PARAVIRTUAL])
# Disk types
HT_DISK_IOEMU = "ioemu"
HT_DISK_IDE = "ide"
HT_DISK_SCSI = "scsi"
HT_DISK_SD = "sd"
HT_DISK_MTD = "mtd"
HT_DISK_PFLASH = "pflash"