Commit bb133242 authored by Klaus Aehlig's avatar Klaus Aehlig

Merge branch 'stable-2.10' into master

* stable-2.10
  Remove FIXME + update man pages wrt iallocator
  Don't check for master netdev when using OVS
  Fix TypeError in backend/ConfigureOVS
  Create Open vSwitch on Master during Cluster init
  SimpleRetry on BlockDev.Remove()
  Show the key in "'None' is not a valid Maybe value"
  Fix usage of xm/xl for gnt-instance modify --new-primary
  Hs2Py constants (17 patches)
  Fix default value for reason trail
  Hotplug (23 patches)

* stable-2.8
  Add all dependencies for confd as test dependencies
  Add snap-server to the test-relevant packages
  Placate warnings on ganeti.outils_unittest.py

Conflicts:
	lib/cmdlib/instance.py: trivial
	src/Ganeti/Types.hs: use both additions
	test/py/cmdlib/instance_unittest.py: manually
            appply ff5def9b to version of master
Signed-off-by: default avatarKlaus Aehlig <aehlig@google.com>
Reviewed-by: default avatarMichele Tartara <mtartara@google.com>
parents a8b1e9f8 cabc869c
......@@ -1327,6 +1327,7 @@ TEST_FILES = \
test/data/kvm_0.9.1_help_boot_test.txt \
test/data/kvm_1.0_help.txt \
test/data/kvm_1.1.2_help.txt \
test/data/kvm_runtime.json \
test/data/lvs_lv.txt \
test/data/NEWS_OK.txt \
test/data/NEWS_previous_unreleased.txt \
......
......@@ -51,6 +51,14 @@ New features
- Hail and hbal now have the (optional) capability of accessing average CPU
load information through the monitoring deamon, and to use it to dynamically
adapt the allocation of instances.
- Hotplug support. Introduce new option '--hotplug' to ``gnt-instance modify``
so that disk and NIC modifications take effect without the need of actual
reboot. There are a couple of constrains currently for this feature:
- only KVM hypervisor (versions >= 1.0) supports it,
- one can not (yet) hotplug a disk using userspace access mode for RBD
- in case of a downgrade instances should suffer a reboot in order to
be migratable (due to core change of runtime files)
Misc changes
~~~~~~~~~~~~
......
......@@ -614,8 +614,12 @@ AC_GHC_PKG_CHECK([temporary], [], [HS_NODEV=1])
# of the checks.
AC_GHC_PKG_CHECK([attoparsec], [], [HS_NODEV=1])
AC_GHC_PKG_CHECK([vector], [], [HS_NODEV=1])
AC_GHC_PKG_CHECK([process], [],
[MONITORING_PKG="$MONITORING_PKG process"])
AC_GHC_PKG_CHECK([process], [], [HS_NODEV=1])
AC_GHC_PKG_CHECK([snap-server], [], [HS_NODEV=1])
AC_GHC_PKG_CHECK([regex-pcre], [], [HS_NODEV=1])
AC_GHC_PKG_CHECK([Crypto], [], [HS_NODEV=1])
AC_GHC_PKG_CHECK([text], [], [HS_NODEV=1])
AC_GHC_PKG_CHECK([hinotify], [], [HS_NODEV=1])
if test -n "$HS_NODEV"; then
AC_MSG_WARN(m4_normalize([Required development modules were not found,
you won't be able to run Haskell unittests]))
......
......@@ -1661,6 +1661,28 @@ def _RemoveBlockDevLinks(instance_name, disks):
logging.exception("Can't remove symlink '%s'", link_name)
def _CalculateDeviceURI(instance, disk, device):
"""Get the URI for the device.
@type instance: L{objects.Instance}
@param instance: the instance which disk belongs to
@type disk: L{objects.Disk}
@param disk: the target disk object
@type device: L{bdev.BlockDev}
@param device: the corresponding BlockDevice
@rtype: string
@return: the device uri if any else None
"""
access_mode = disk.params.get(constants.LDP_ACCESS,
constants.DISK_KERNELSPACE)
if access_mode == constants.DISK_USERSPACE:
# This can raise errors.BlockDeviceError
return device.GetUserspaceAccessUri(instance.hypervisor)
else:
return None
def _GatherAndLinkBlockDevs(instance):
"""Set up an instance's block device(s).
......@@ -1670,7 +1692,7 @@ def _GatherAndLinkBlockDevs(instance):
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should assemble
@rtype: list
@return: list of (disk_object, device_path)
@return: list of (disk_object, link_name, drive_uri)
"""
block_devices = []
......@@ -1685,8 +1707,9 @@ def _GatherAndLinkBlockDevs(instance):
except OSError, e:
raise errors.BlockDeviceError("Cannot create block device symlink: %s" %
e.strerror)
uri = _CalculateDeviceURI(instance, disk, device)
block_devices.append((disk, link_name, device))
block_devices.append((disk, link_name, uri))
return block_devices
......@@ -1993,6 +2016,43 @@ def GetMigrationStatus(instance):
_Fail("Failed to get migration status: %s", err, exc=True)
def HotplugDevice(instance, action, dev_type, device, extra, seq):
"""Hotplug a device
Hotplug is currently supported only for KVM Hypervisor.
@type instance: L{objects.Instance}
@param instance: the instance to which we hotplug a device
@type action: string
@param action: the hotplug action to perform
@type dev_type: string
@param dev_type: the device type to hotplug
@type device: either L{objects.NIC} or L{objects.Disk}
@param device: the device object to hotplug
@type extra: string
@param extra: extra info used by hotplug code (e.g. disk link)
@type seq: int
@param seq: the index of the device from master perspective
@raise RPCFail: in case instance does not have KVM hypervisor
"""
hyper = hypervisor.GetHypervisor(instance.hypervisor)
try:
hyper.VerifyHotplugSupport(instance, action, dev_type)
except errors.HotplugError, err:
_Fail("Hotplug is not supported: %s", err)
if action == constants.HOTPLUG_ACTION_ADD:
fn = hyper.HotAddDevice
elif action == constants.HOTPLUG_ACTION_REMOVE:
fn = hyper.HotDelDevice
elif action == constants.HOTPLUG_ACTION_MODIFY:
fn = hyper.HotModDevice
else:
assert action in constants.HOTPLUG_ALL_ACTIONS
return fn(instance, dev_type, device, extra, seq)
def BlockdevCreate(disk, size, owner, on_primary, info, excl_stor):
"""Creates a block device for an instance.
......@@ -2168,10 +2228,18 @@ def BlockdevRemove(disk):
rdev = None
if rdev is not None:
r_path = rdev.dev_path
try:
rdev.Remove()
except errors.BlockDeviceError, err:
msgs.append(str(err))
def _TryRemove():
try:
rdev.Remove()
return []
except errors.BlockDeviceError, err:
return [str(err)]
msgs.extend(utils.SimpleRetry([], _TryRemove,
constants.DISK_REMOVE_RETRY_INTERVAL,
constants.DISK_REMOVE_RETRY_TIMEOUT))
if not msgs:
DevCacheManager.RemoveCache(r_path)
......@@ -2245,23 +2313,28 @@ def BlockdevAssemble(disk, owner, as_primary, idx):
This is a wrapper over _RecursiveAssembleBD.
@rtype: str or boolean
@return: a C{/dev/...} path for primary nodes, and
C{True} for secondary nodes
@return: a tuple with the C{/dev/...} path and the created symlink
for primary nodes, and (C{True}, C{True}) for secondary nodes
"""
try:
result = _RecursiveAssembleBD(disk, owner, as_primary)
if isinstance(result, BlockDev):
# pylint: disable=E1103
result = result.dev_path
dev_path = result.dev_path
link_name = None
if as_primary:
_SymlinkBlockDev(owner, result, idx)
link_name = _SymlinkBlockDev(owner, dev_path, idx)
elif result:
return result, result
else:
_Fail("Unexpected result from _RecursiveAssembleBD")
except errors.BlockDeviceError, err:
_Fail("Error while assembling disk: %s", err, exc=True)
except OSError, err:
_Fail("Error while symlinking disk: %s", err, exc=True)
return result
return dev_path, link_name
def BlockdevShutdown(disk):
......@@ -4268,16 +4341,16 @@ def ConfigureOVS(ovs_name, ovs_link):
# Initialize the OpenvSwitch
result = utils.RunCmd(["ovs-vsctl", "add-br", ovs_name])
if result.failed:
_Fail("Failed to create openvswitch %s. Script return value: %s, output:"
" '%s'" % result.exit_code, result.output, log=True)
_Fail("Failed to create openvswitch. Script return value: %s, output: '%s'"
% (result.exit_code, result.output), log=True)
# And connect it to a physical interface, if given
if ovs_link:
result = utils.RunCmd(["ovs-vsctl", "add-port", ovs_name, ovs_link])
if result.failed:
_Fail("Failed to connect openvswitch to interface %s. Script return"
" value: %s, output: '%s'" % ovs_link, result.exit_code,
result.output, log=True)
" value: %s, output: '%s'" % (ovs_link, result.exit_code,
result.output), log=True)
class HooksRunner(object):
......
......@@ -614,11 +614,14 @@ def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
errors.ECODE_INVAL)
result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
if result.failed:
raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
(master_netdev,
result.output.strip()), errors.ECODE_INVAL)
if not nicparams.get('mode', None) == "openvswitch":
# Do not do this check if mode=openvswitch, since the openvswitch is not
# created yet
result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
if result.failed:
raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
(master_netdev,
result.output.strip()), errors.ECODE_INVAL)
dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
utils.EnsureDirs(dirs)
......
......@@ -95,6 +95,7 @@ __all__ = [
"GLOBAL_FILEDIR_OPT",
"HID_OS_OPT",
"GLOBAL_SHARED_FILEDIR_OPT",
"HOTPLUG_OPT",
"HVLIST_OPT",
"HVOPTS_OPT",
"HYPERVISOR_OPT",
......@@ -1641,6 +1642,10 @@ INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
default=False, action="store_true",
help="Include default values")
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
action="store_true", default=False,
help="Hotplug supported devices (NICs and Disks)")
#: Options provided by all commands
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
......
......@@ -1317,6 +1317,14 @@ def SetInstanceParams(opts, args):
allowed_values=[constants.VALUE_DEFAULT])
nics = _ConvertNicDiskModifications(opts.nics)
for action, _, __ in nics:
if action == constants.DDM_MODIFY and opts.hotplug and not opts.force:
usertext = ("You are about to hot-modify a NIC. This will be done"
" by removing the exisiting and then adding a new one."
" Network connection might be lost. Continue?")
if not AskUser(usertext):
return 1
disks = _ParseDiskSizes(_ConvertNicDiskModifications(opts.disks))
if (opts.disk_template and
......@@ -1336,6 +1344,7 @@ def SetInstanceParams(opts, args):
op = opcodes.OpInstanceSetParams(instance_name=args[0],
nics=nics,
disks=disks,
hotplug=opts.hotplug,
disk_template=opts.disk_template,
remote_node=opts.node,
pnode=opts.new_primary_node,
......@@ -1543,7 +1552,7 @@ commands = {
[DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT, OFFLINE_INST_OPT,
ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT,
NOCONFLICTSCHECK_OPT, NEW_PRIMARY_OPT],
NOCONFLICTSCHECK_OPT, NEW_PRIMARY_OPT, HOTPLUG_OPT],
"<instance>", "Alters the parameters of an instance"),
"shutdown": (
GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
......
......@@ -181,6 +181,29 @@ class LUClusterPostInit(LogicalUnit):
HPATH = "cluster-init"
HTYPE = constants.HTYPE_CLUSTER
def CheckArguments(self):
self.master_uuid = self.cfg.GetMasterNode()
self.master_ndparams = self.cfg.GetNdParams(self.cfg.GetMasterNodeInfo())
# TODO: When Issue 584 is solved, and None is properly parsed when used
# as a default value, ndparams.get(.., None) can be changed to
# ndparams[..] to access the values directly
# OpenvSwitch: Warn user if link is missing
if (self.master_ndparams[constants.ND_OVS] and not
self.master_ndparams.get(constants.ND_OVS_LINK, None)):
self.LogInfo("No physical interface for OpenvSwitch was given."
" OpenvSwitch will not have an outside connection. This"
" might not be what you want.")
# OpenvSwitch: Warn if parameters are given, but OVS is not enabled.
if (not self.master_ndparams[constants.ND_OVS] and
(self.master_ndparams[constants.ND_OVS_NAME] or
self.master_ndparams.get(constants.ND_OVS_LINK, None))):
self.LogInfo("OpenvSwitch name or link were given, but"
" OpenvSwitch is not enabled. Please enable"
" OpenvSwitch with 'ovs=true' or create it manually")
def BuildHooksEnv(self):
"""Build hooks env.
......@@ -196,9 +219,15 @@ class LUClusterPostInit(LogicalUnit):
return ([], [self.cfg.GetMasterNode()])
def Exec(self, feedback_fn):
"""Nothing to do.
"""Create and configure Open vSwitch
"""
if self.master_ndparams[constants.ND_OVS]:
result = self.rpc.call_node_configure_ovs(
self.master_uuid,
self.master_ndparams[constants.ND_OVS_NAME],
self.master_ndparams.get(constants.ND_OVS_LINK, None))
result.Raise("Could not successully configure Open vSwitch")
return True
......
......@@ -2227,11 +2227,13 @@ def _ApplyContainerMods(kind, container, chgdesc, mods,
if op == constants.DDM_REMOVE:
assert not params
if remove_fn is not None:
remove_fn(absidx, item, private)
changes = [("%s/%s" % (kind, absidx), "remove")]
if remove_fn is not None:
msg = remove_fn(absidx, item, private)
if msg:
changes.append(("%s/%s" % (kind, absidx), msg))
assert container[absidx] == item
del container[absidx]
elif op == constants.DDM_MODIFY:
......@@ -2806,6 +2808,7 @@ class LUInstanceSetParams(LogicalUnit):
assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
self.cluster = self.cfg.GetClusterInfo()
cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
......@@ -2819,7 +2822,7 @@ class LUInstanceSetParams(LogicalUnit):
# verify that the instance is not up
instance_info = self.rpc.call_instance_info(
pnode_uuid, self.instance.name, self.instance.hypervisor,
self.instance.hvparams)
cluster_hvparams)
if instance_info.fail_msg:
self.warn.append("Can't get instance runtime information: %s" %
instance_info.fail_msg)
......@@ -2938,9 +2941,9 @@ class LUInstanceSetParams(LogicalUnit):
mem_check_list.extend(self.instance.secondary_nodes)
instance_info = self.rpc.call_instance_info(
pnode_uuid, self.instance.name, self.instance.hypervisor,
self.instance.hvparams)
cluster_hvparams)
hvspecs = [(self.instance.hypervisor,
self.cluster.hvparams[self.instance.hypervisor])]
cluster_hvparams)]
nodeinfo = self.rpc.call_node_info(mem_check_list, None,
hvspecs)
pninfo = nodeinfo[pnode_uuid]
......@@ -3001,7 +3004,7 @@ class LUInstanceSetParams(LogicalUnit):
remote_info = self.rpc.call_instance_info(
self.instance.primary_node, self.instance.name,
self.instance.hypervisor,
self.cluster.hvparams[self.instance.hypervisor])
cluster_hvparams)
remote_info.Raise("Error checking node %s" %
self.cfg.GetNodeName(self.instance.primary_node))
if not remote_info.payload: # not running already
......@@ -3062,7 +3065,8 @@ class LUInstanceSetParams(LogicalUnit):
# Operate on copies as this is still in prereq
nics = [nic.Copy() for nic in self.instance.nics]
_ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
self._CreateNewNic, self._ApplyNicMods, None)
self._CreateNewNic, self._ApplyNicMods,
self._RemoveNic)
# Verify that NIC names are unique and valid
utils.ValidateDeviceNames("NIC", nics)
self._new_nics = nics
......@@ -3232,6 +3236,22 @@ class LUInstanceSetParams(LogicalUnit):
(idx, self.cfg.GetNodeName(pnode_uuid)),
self.LogWarning)
def _HotplugDevice(self, action, dev_type, device, extra, seq):
self.LogInfo("Trying to hotplug device...")
msg = "hotplug:"
result = self.rpc.call_hotplug_device(self.instance.primary_node,
self.instance, action, dev_type,
(device, self.instance),
extra, seq)
if result.fail_msg:
self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
self.LogInfo("Continuing execution..")
msg += "failed"
else:
self.LogInfo("Hotplug done.")
msg += "done"
return msg
def _CreateNewDisk(self, idx, params, _):
"""Creates a new disk.
......@@ -3257,9 +3277,26 @@ class LUInstanceSetParams(LogicalUnit):
disks=[(idx, disk, 0)],
cleanup=new_disks)
return (disk, [
("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
])
changes = [
("disk/%d" % idx,
"add:size=%s,mode=%s" % (disk.size, disk.mode)),
]
if self.op.hotplug:
result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
(disk, self.instance),
self.instance.name, True, idx)
if result.fail_msg:
changes.append(("disk/%d" % idx, "assemble:failed"))
self.LogWarning("Can't assemble newly created disk %d: %s",
idx, result.fail_msg)
else:
_, link_name = result.payload
msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
constants.HOTPLUG_TARGET_DISK,
disk, link_name, idx)
changes.append(("disk/%d" % idx, msg))
return (disk, changes)
def _PostAddDisk(self, _, disk):
if not WaitForSync(self, self.instance, disks=[disk],
......@@ -3293,6 +3330,13 @@ class LUInstanceSetParams(LogicalUnit):
"""Removes a disk.
"""
hotmsg = ""
if self.op.hotplug:
hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
constants.HOTPLUG_TARGET_DISK,
root, None, idx)
ShutdownInstanceDisks(self, self.instance, [root])
(anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
for node_uuid, disk in anno_disk.ComputeNodeTree(
self.instance.primary_node):
......@@ -3307,6 +3351,8 @@ class LUInstanceSetParams(LogicalUnit):
if root.dev_type in constants.DTS_DRBD:
self.cfg.AddTcpUdpPort(root.logical_id[2])
return hotmsg
def _CreateNewNic(self, idx, params, private):
"""Creates data structure for a new network interface.
......@@ -3322,13 +3368,20 @@ class LUInstanceSetParams(LogicalUnit):
nicparams=nicparams)
nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
return (nobj, [
changes = [
("nic.%d" % idx,
"add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
(mac, ip, private.filled[constants.NIC_MODE],
private.filled[constants.NIC_LINK],
net)),
])
private.filled[constants.NIC_LINK], net)),
]
if self.op.hotplug:
msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
constants.HOTPLUG_TARGET_NIC,
nobj, None, idx)
changes.append(("nic.%d" % idx, msg))
return (nobj, changes)
def _ApplyNicMods(self, idx, nic, params, private):
"""Modifies a network interface.
......@@ -3353,8 +3406,20 @@ class LUInstanceSetParams(LogicalUnit):
for (key, val) in nic.nicparams.items():
changes.append(("nic.%s/%d" % (key, idx), val))
if self.op.hotplug:
msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
constants.HOTPLUG_TARGET_NIC,
nic, None, idx)
changes.append(("nic/%d" % idx, msg))
return changes
def _RemoveNic(self, idx, nic, _):
if self.op.hotplug:
return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
constants.HOTPLUG_TARGET_NIC,
nic, None, idx)
def Exec(self, feedback_fn):
"""Modifies an instance.
......
......@@ -1211,8 +1211,11 @@ def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
ignored.
"""
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
all_result = True
if disks is None:
# only mark instance disks as inactive if all disks are affected
lu.cfg.MarkInstanceDisksInactive(instance.uuid)
disks = ExpandCheckDisks(instance, disks)
for disk in disks:
......@@ -1240,7 +1243,7 @@ def _SafeShutdownInstanceDisks(lu, instance, disks=None):
def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
ignore_size=False):
ignore_size=False):
"""Prepare the block devices for an instance.
This sets up the block devices on all nodes.
......@@ -1265,6 +1268,11 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
"""
device_info = []
disks_ok = True
if disks is None:
# only mark instance disks as active if all disks are affected
lu.cfg.MarkInstanceDisksActive(instance.uuid)
disks = ExpandCheckDisks(instance, disks)
# With the two passes mechanism we try to reduce the window of
......@@ -1276,10 +1284,6 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
# into any other network-connected state (Connected, SyncTarget,
# SyncSource, etc.)
# mark instance disks as active before doing actual work, so watcher does
# not try to shut them down erroneously
lu.cfg.MarkInstanceDisksActive(instance.uuid)
# 1st pass, assemble on all nodes in secondary mode
for idx, inst_disk in enumerate(disks):
for node_uuid, node_disk in inst_disk.ComputeNodeTree(
......@@ -1321,7 +1325,7 @@ def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
disks_ok = False
else:
dev_path = result.payload
dev_path, _ = result.payload
device_info.append((lu.cfg.GetNodeName(instance.primary_node),
inst_disk.iv_name, dev_path))
......
......@@ -120,26 +120,7 @@ DAEMONS_LOGFILES = \
dict((daemon, pathutils.GetLogFilename(DAEMONS_LOGBASE[daemon]))
for daemon in DAEMONS_LOGBASE)
# Some daemons might require more than one logfile.
# Specifically, right now only the Haskell http library "snap", used by the
# monitoring daemon, requires multiple log files.
# These are the only valid reasons for having an extra logfile
EXTRA_LOGREASON_ACCESS = "access"
EXTRA_LOGREASON_ERROR = "error"
VALID_EXTRA_LOGREASONS = compat.UniqueFrozenset([
EXTRA_LOGREASON_ACCESS,
EXTRA_LOGREASON_ERROR,
])
# These are the extra logfiles, grouped by daemon
DAEMONS_EXTRA_LOGBASE = {
MOND: {
EXTRA_LOGREASON_ACCESS: _constants.EXTRA_LOGREASON_ACCESS,
EXTRA_LOGREASON_ERROR: _constants.EXTRA_LOGREASON_ERROR,
}
}
DAEMONS_EXTRA_LOGBASE = _constants.DAEMONS_EXTRA_LOGBASE
DAEMONS_EXTRA_LOGFILES = \
dict((daemon, dict((extra,
......@@ -222,14 +203,13 @@ IEIO_FILE = _constants.IEIO_FILE
IEIO_RAW_DISK = _constants.IEIO_RAW_DISK
IEIO_SCRIPT = _constants.IEIO_SCRIPT
VALUE_DEFAULT = "default"
VALUE_AUTO = "auto"
VALUE_GENERATE = "generate"
VALUE_NONE = "none"
VALUE_TRUE = "true"
VALUE_FALSE = "false"
VALUE_HS_NOTHING = {"Nothing": None}
VALUE_DEFAULT = _constants.VALUE_DEFAULT
VALUE_AUTO = _constants.VALUE_AUTO
VALUE_GENERATE = _constants.VALUE_GENERATE