Commit f39cd91d authored by Balazs Lecz's avatar Balazs Lecz
Browse files

Merge branch 'devel-2.1'



Conflicts:
	doc/security.rst
	  trivial
	lib/cli.py
	  trivial
Signed-off-by: default avatarBalazs Lecz <leczb@google.com>
Reviewed-by: default avatarMichael Hanselmann <hansmi@google.com>
parents 1651d116 4e9dcb8a
......@@ -36,10 +36,10 @@ DIRS = \
doc/examples/hooks \
lib \
lib/build \
lib/confd \
lib/http \
lib/hypervisor \
lib/rapi \
lib/confd \
man \
qa \
scripts \
......@@ -57,6 +57,7 @@ maintainer-clean-local:
rm -rf doc/api doc/html doc/coverage
CLEANFILES = \
$(addsuffix /*.py[co],$(DIRS)) \
autotools/replace_vars.sed \
daemons/daemon-util \
daemons/ganeti-cleaner \
......@@ -66,15 +67,8 @@ CLEANFILES = \
doc/examples/ganeti.cron \
doc/examples/gnt-config-backup \
doc/examples/hooks/ipsec \
lib/*.py[co] \
lib/build/*.py[co] \
lib/http/*.py[co] \
lib/hypervisor/*.py[co] \
lib/rapi/*.py[co] \
$(man_MANS) \
$(manhtml) \
qa/*.py[co] \
test/*.py[co] \
stamp-directories \
stamp-srclinks \
$(nodist_pkgpython_PYTHON)
......@@ -116,6 +110,7 @@ pkgpython_PYTHON = \
lib/ssh.py \
lib/storage.py \
lib/utils.py \
lib/uidpool.py \
lib/workerpool.py
hypervisor_PYTHON = \
......@@ -142,8 +137,8 @@ http_PYTHON = \
confd_PYTHON = \
lib/confd/__init__.py \
lib/confd/client.py \
lib/confd/server.py \
lib/confd/querylib.py
lib/confd/querylib.py \
lib/confd/server.py
docrst = \
doc/admin.rst \
......@@ -350,6 +345,7 @@ python_tests = \
test/ganeti.rapi.resources_unittest.py \
test/ganeti.serializer_unittest.py \
test/ganeti.ssh_unittest.py \
test/ganeti.uidpool_unittest.py \
test/ganeti.utils_unittest.py \
test/ganeti.workerpool_unittest.py \
test/docs_unittest.py \
......@@ -444,7 +440,6 @@ man/%.7: man/%.7.in $(REPLACE_VARS_SED)
man/%.8: man/%.8.in $(REPLACE_VARS_SED)
sed -f $(REPLACE_VARS_SED) < $< > $@
$(CHECK_MAN) $@
man/%.html: man/%.html.in $(REPLACE_VARS_SED)
sed -f $(REPLACE_VARS_SED) < $< > $@
......
......@@ -15,7 +15,7 @@ Objective
=========
Ganeti 2.1 will add features to help further automatization of cluster
operations, further improbe scalability to even bigger clusters, and
operations, further improve scalability to even bigger clusters, and
make it easier to debug the Ganeti core.
Background
......@@ -725,6 +725,177 @@ evacuate`` code and run replace-secondary with an iallocator script for
all instances on the node.
User-id pool
~~~~~~~~~~~~
In order to allow running different processes under unique user-ids
on a node, we introduce the user-id pool concept.
The user-id pool is a cluster-wide configuration parameter.
It is a list of user-ids and/or user-id ranges that are reserved
for running Ganeti processes (including KVM instances).
The code guarantees that on a given node a given user-id is only
handed out if there is no other process running with that user-id.
Please note, that this can only be guaranteed if all processes in
the system - that run under a user-id belonging to the pool - are
started by reserving a user-id first. That can be accomplished
either by using the RequestUnusedUid() function to get an unused
user-id or by implementing the same locking mechanism.
Implementation
++++++++++++++
The functions that are specific to the user-id pool feature are located
in a separate module: ``lib/uidpool.py``.
Storage
^^^^^^^
The user-id pool is a single cluster parameter. It is stored in the
*Cluster* object under the ``uid_pool`` name as a list of integer
tuples. These tuples represent the boundaries of user-id ranges.
For single user-ids, the boundaries are equal.
The internal user-id pool representation is converted into a
string: a newline separated list of user-ids or user-id ranges.
This string representation is distributed to all the nodes via the
*ssconf* mechanism. This means that the user-id pool can be
accessed in a read-only way on any node without consulting the master
node or master candidate nodes.
Initial value
^^^^^^^^^^^^^
The value of the user-id pool cluster parameter can be initialized
at cluster initialization time using the
``gnt-cluster init --uid-pool <uid-pool definition> ...``
command.
As there is no sensible default value for the user-id pool parameter,
it is initialized to an empty list if no ``--uid-pool`` option is
supplied at cluster init time.
If the user-id pool is empty, the user-id pool feature is considered
to be disabled.
Manipulation
^^^^^^^^^^^^
The user-id pool cluster parameter can be modified from the
command-line with the following commands:
- ``gnt-cluster modify --uid-pool <uid-pool definition>``
- ``gnt-cluster modify --add-uids <uid-pool definition>``
- ``gnt-cluster modify --remove-uids <uid-pool definition>``
The ``--uid-pool`` option overwrites the current setting with the
supplied ``<uid-pool definition>``, while
``--add-uids``/``--remove-uids`` adds/removes the listed uids
or uid-ranges from the pool.
The ``<uid-pool definition>`` should be a comma-separated list of
user-ids or user-id ranges. A range should be defined by a lower and
a higher boundary. The boundaries should be separated with a dash.
The boundaries are inclusive.
The ``<uid-pool definition>`` is parsed into the internal
representation, sanity-checked and stored in the ``uid_pool``
attribute of the *Cluster* object.
It is also immediately converted into a string (formatted in the
input format) and distributed to all nodes via the *ssconf* mechanism.
Inspection
^^^^^^^^^^
The current value of the user-id pool cluster parameter is printed
by the ``gnt-cluster info`` command.
The output format is accepted by the ``gnt-cluster modify --uid-pool``
command.
Locking
^^^^^^^
The ``uidpool.py`` module provides a function (``RequestUnusedUid``)
for requesting an unused user-id from the pool.
This will try to find a random user-id that is not currently in use.
The algorithm is the following:
1) Randomize the list of user-ids in the user-id pool
2) Iterate over this randomized UID list
3) Create a lock file (it doesn't matter if it already exists)
4) Acquire an exclusive POSIX lock on the file, to provide mutual
exclusion for the following non-atomic operations
5) Check if there is a process in the system with the given UID
6) If there isn't, return the UID, otherwise unlock the file and
continue the iteration over the user-ids
The user can than start a new process with this user-id.
Once a process is successfully started, the exclusive POSIX lock can
be released, but the lock file will remain in the filesystem.
The presence of such a lock file means that the given user-id is most
probably in use. The lack of a uid lock file does not guarantee that
there are no processes with that user-id.
After acquiring the exclusive POSIX lock, ``RequestUnusedUid``
always performs a check to see if there is a process running with the
given uid.
A user-id can be returned to the pool, by calling the
``ReleaseUid`` function. This will remove the corresponding lock file.
Note, that it doesn't check if there is any process still running
with that user-id. The removal of the lock file only means that there
are most probably no processes with the given user-id. This helps
in speeding up the process of finding a user-id that is guaranteed to
be unused.
There is a convenience function, called ``ExecWithUnusedUid`` that
wraps the execution of a function (or any callable) that requires a
unique user-id. ``ExecWithUnusedUid`` takes care of requesting an
unused user-id and unlocking the lock file. It also automatically
returns the user-id to the pool if the callable raises an exception.
Code examples
+++++++++++++
Requesting a user-id from the pool:
::
from ganeti import ssconf
from ganeti import uidpool
# Get list of all user-ids in the uid-pool from ssconf
ss = ssconf.SimpleStore()
uid_pool = uidpool.ParseUidPool(ss.GetUidPool(), separator="\n")
all_uids = set(uidpool.ExpandUidPool(uid_pool))
uid = uidpool.RequestUnusedUid(all_uids)
try:
<start a process with the UID>
# Once the process is started, we can release the file lock
uid.Unlock()
except ..., err:
# Return the UID to the pool
uidpool.ReleaseUid(uid)
Releasing a user-id:
::
from ganeti import uidpool
uid = <get the UID the process is running under>
<stop the process>
uidpool.ReleaseUid(uid)
External interface changes
--------------------------
......
......@@ -109,6 +109,48 @@ of other clusters. With Ganeti 2.2, clusters can exchange data if tokens
(an encryption certificate) was exchanged by a trusted third party
before.
KVM Security
------------
When running KVM instances under Ganeti three security models ara
available: 'none', 'user' and 'pool'.
Under security model 'none' instances run by default as root. This means
that, if an instance gets jail broken, it will be able to own the host
node, and thus the ganeti cluster. This is the default model, and the
only one available before Ganeti 2.1.2.
Under security model 'user' an instance is run as the user specified by
the hypervisor parameter 'security_domain'. This makes it easy to run
all instances as non privileged users, and allows to manually allocate
specific users to specific instances or sets of instances. If the
specified user doesn't have permissions a jail broken instance will need
some local privilege escalation before being able to take over the node
and the cluster. It's possible though for a jail broken instance to
affect other ones running under the same user.
Under security model 'pool' a global cluster-level uid pool is used to
start each instance on the same node under a different user. The uids in
the cluster pool can be set with ``gnt-cluster init`` and ``gnt-cluster
modify``, and must correspond to existing users on all nodes. Ganeti
will then allocate one to each instance, as needed. This way a jail
broken instance won't be able to affect any other. Since the users are
handed out by ganeti in a per-node randomized way, in this mode there is
no way to make sure a particular instance is always run as a certain
user. Use mode 'user' for that.
In addition to these precautions, if you want to avoid instances sending
traffic on your node network, you can use an iptables rule such as::
iptables -A OUTPUT -m owner --uid-owner <uid>[-<uid>] -j LOG \
--log-prefix "ganeti uid pool user network traffic"
iptables -A OUTPUT -m owner --uid-owner <uid>[-<uid>] -j DROP
This won't affect regular instance traffic (that comes out of the tapX
allocated to the instance, and can be filtered or subject to appropriate
policy routes) but will stop any user generated traffic that might come
from a jailbroken instance.
.. vim: set textwidth=72 :
.. Local Variables:
.. mode: rst
......
......@@ -1109,6 +1109,11 @@ def InstanceShutdown(instance, timeout):
if iname in hyper.ListInstances():
_Fail("Could not shutdown instance %s even by destroy", iname)
try:
hyper.CleanupInstance(instance.name)
except errors.HypervisorError, err:
logging.warning("Failed to execute post-shutdown cleanup step: %s", err)
_RemoveBlockDevLinks(iname, instance.disks)
......
......@@ -217,7 +217,8 @@ def InitCluster(cluster_name, mac_prefix,
secondary_ip=None, vg_name=None, beparams=None,
nicparams=None, hvparams=None, enabled_hypervisors=None,
modify_etc_hosts=True, modify_ssh_setup=True,
maintain_node_health=False):
maintain_node_health=False,
uid_pool=None):
"""Initialise the cluster.
@type candidate_pool_size: int
......@@ -340,6 +341,7 @@ def InitCluster(cluster_name, mac_prefix,
candidate_pool_size=candidate_pool_size,
modify_etc_hosts=modify_etc_hosts,
modify_ssh_setup=modify_ssh_setup,
uid_pool=uid_pool,
ctime=now,
mtime=now,
uuid=utils.NewUUID(),
......
......@@ -44,6 +44,7 @@ from optparse import (OptionParser, TitledHelpFormatter,
__all__ = [
# Command line options
"ADD_UIDS_OPT",
"ALLOCATABLE_OPT",
"ALL_OPT",
"AUTO_PROMOTE_OPT",
......@@ -115,6 +116,7 @@ __all__ = [
"READD_OPT",
"REBOOT_TYPE_OPT",
"REMOVE_INSTANCE_OPT",
"REMOVE_UIDS_OPT",
"SECONDARY_IP_OPT",
"SELECT_OS_OPT",
"SEP_OPT",
......@@ -128,6 +130,7 @@ __all__ = [
"SYNC_OPT",
"TAG_SRC_OPT",
"TIMEOUT_OPT",
"UIDPOOL_OPT",
"USEUNITS_OPT",
"USE_REPL_NET_OPT",
"VERBOSE_OPT",
......@@ -956,6 +959,23 @@ IDENTIFY_DEFAULTS_OPT = \
" the current cluster defaults and set them as such, instead"
" of marking them as overridden")
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
action="store", dest="uid_pool",
help=("A list of user-ids or user-id"
" ranges separated by commas"))
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
action="store", dest="add_uids",
help=("A list of user-ids or user-id"
" ranges separated by commas, to be"
" added to the user-id pool"))
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
action="store", dest="remove_uids",
help=("A list of user-ids or user-id"
" ranges separated by commas, to be"
" removed from the user-id pool"))
def _ParseArgs(argv, commands, aliases):
"""Parser for the command line arguments.
......
......@@ -44,6 +44,7 @@ from ganeti import constants
from ganeti import objects
from ganeti import serializer
from ganeti import ssconf
from ganeti import uidpool
class LogicalUnit(object):
......@@ -2265,8 +2266,18 @@ class LUSetClusterParams(LogicalUnit):
if self.op.candidate_pool_size < 1:
raise errors.OpPrereqError("At least one master candidate needed",
errors.ECODE_INVAL)
_CheckBooleanOpField(self.op, "maintain_node_health")
if self.op.uid_pool:
uidpool.CheckUidPool(self.op.uid_pool)
if self.op.add_uids:
uidpool.CheckUidPool(self.op.add_uids)
if self.op.remove_uids:
uidpool.CheckUidPool(self.op.remove_uids)
def ExpandNames(self):
# FIXME: in the future maybe other cluster params won't require checking on
# all nodes to be modified.
......@@ -2358,7 +2369,7 @@ class LUSetClusterParams(LogicalUnit):
"\n".join(nic_errors))
# hypervisor list/parameters
self.new_hvparams = objects.FillDict(cluster.hvparams, {})
self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
if self.op.hvparams:
if not isinstance(self.op.hvparams, dict):
raise errors.OpPrereqError("Invalid 'hvparams' parameter on input",
......@@ -2388,6 +2399,7 @@ class LUSetClusterParams(LogicalUnit):
else:
self.new_os_hvp[os_name][hv_name].update(hv_dict)
# changes to the hypervisor list
if self.op.enabled_hypervisors is not None:
self.hv_list = self.op.enabled_hypervisors
if not self.hv_list:
......@@ -2400,6 +2412,16 @@ class LUSetClusterParams(LogicalUnit):
" entries: %s" %
utils.CommaJoin(invalid_hvs),
errors.ECODE_INVAL)
for hv in self.hv_list:
# if the hypervisor doesn't already exist in the cluster
# hvparams, we initialize it to empty, and then (in both
# cases) we make sure to fill the defaults, as we might not
# have a complete defaults list if the hypervisor wasn't
# enabled before
if hv not in new_hvp:
new_hvp[hv] = {}
new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
else:
self.hv_list = cluster.enabled_hypervisors
......@@ -2447,6 +2469,7 @@ class LUSetClusterParams(LogicalUnit):
if self.op.os_hvp:
self.cluster.os_hvp = self.new_os_hvp
if self.op.enabled_hypervisors is not None:
self.cluster.hvparams = self.new_hvparams
self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
if self.op.beparams:
self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
......@@ -2461,6 +2484,15 @@ class LUSetClusterParams(LogicalUnit):
if self.op.maintain_node_health is not None:
self.cluster.maintain_node_health = self.op.maintain_node_health
if self.op.add_uids is not None:
uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
if self.op.remove_uids is not None:
uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
if self.op.uid_pool is not None:
self.cluster.uid_pool = self.op.uid_pool
self.cfg.Update(self.cluster, feedback_fn)
......@@ -3298,15 +3330,19 @@ class LUAddNode(LogicalUnit):
raise errors.OpPrereqError("Node %s is not in the configuration" % node,
errors.ECODE_NOENT)
self.changed_primary_ip = False
for existing_node_name in node_list:
existing_node = cfg.GetNodeInfo(existing_node_name)
if self.op.readd and node == existing_node_name:
if (existing_node.primary_ip != primary_ip or
existing_node.secondary_ip != secondary_ip):
if existing_node.secondary_ip != secondary_ip:
raise errors.OpPrereqError("Readded node doesn't have the same IP"
" address configuration as before",
errors.ECODE_INVAL)
if existing_node.primary_ip != primary_ip:
self.changed_primary_ip = True
continue
if (existing_node.primary_ip == primary_ip or
......@@ -3378,6 +3414,8 @@ class LUAddNode(LogicalUnit):
self.LogInfo("Readding a node, the offline/drained flags were reset")
# if we demote the node, we do cleanup later in the procedure
new_node.master_candidate = self.master_candidate
if self.changed_primary_ip:
new_node.primary_ip = self.op.primary_ip
# notify the user about any possible mc promotion
if new_node.master_candidate:
......@@ -3719,6 +3757,7 @@ class LUQueryClusterInfo(NoHooksLU):
"mtime": cluster.mtime,
"uuid": cluster.uuid,
"tags": list(cluster.GetTags()),
"uid_pool": cluster.uid_pool,
}
return result
......
......@@ -43,6 +43,7 @@ from ganeti import constants
from ganeti import rpc
from ganeti import objects
from ganeti import serializer
from ganeti import uidpool
_config_lock = locking.SharedLock()
......@@ -363,6 +364,11 @@ class ConfigWriter:
if invalid_hvs:
result.append("enabled hypervisors contains invalid entries: %s" %
invalid_hvs)
missing_hvp = (set(data.cluster.enabled_hypervisors) -
set(data.cluster.hvparams.keys()))
if missing_hvp:
result.append("hypervisor parameters missing for the enabled"
" hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
if data.cluster.master_node not in data.nodes:
result.append("cluster has invalid primary node '%s'" %
......@@ -1372,6 +1378,8 @@ class ConfigWriter:
hypervisor_list = fn(cluster.enabled_hypervisors)
uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n")
return {
constants.SS_CLUSTER_NAME: cluster.cluster_name,
constants.SS_CLUSTER_TAGS: cluster_tags,
......@@ -1390,6 +1398,7 @@ class ConfigWriter:
constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
constants.SS_HYPERVISOR_LIST: hypervisor_list,
constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health),
constants.SS_UID_POOL: uid_pool,
}
@locking.ssynchronized(_config_lock, shared=1)
......
......@@ -100,6 +100,9 @@ IMPORT_EXPORT_DIR_MODE = 0755
SUB_RUN_DIRS = [ RUN_GANETI_DIR, BDEV_CACHE_DIR, DISK_LINKS_DIR ]
LOCK_DIR = _autoconf.LOCALSTATEDIR + "/lock"
SSCONF_LOCK_FILE = LOCK_DIR + "/ganeti-ssconf.lock"
# User-id pool lock directory
# The user-ids that are in use have a corresponding lock file in this directory
UIDPOOL_LOCKDIR = RUN_GANETI_DIR + "/uid-pool"
CLUSTER_CONF_FILE = DATA_DIR + "/config.data"
NODED_CERT_FILE = DATA_DIR + "/server.pem"
RAPI_CERT_FILE = DATA_DIR + "/rapi.pem"
......@@ -694,6 +697,7 @@ SS_INSTANCE_LIST = "instance_list"
SS_RELEASE_VERSION = "release_version"
SS_HYPERVISOR_LIST = "hypervisor_list"
SS_MAINTAIN_NODE_HEALTH = "maintain_node_health"
SS_UID_POOL = "uid_pool"
# cluster wide default parameters
DEFAULT_ENABLED_HYPERVISOR = HT_XEN_PVM
......@@ -861,3 +865,10 @@ CONFD_CLIENT_EXPIRE_TIMEOUT = 10
# (assuming we can't use jumbo frames)
# We just set this to 60K, which should be enough
MAX_UDP_DATA_SIZE = 61440
# User-id pool minimum/maximum acceptable user-ids.
UIDPOOL_UID_MIN = 0
UIDPOOL_UID_MAX = 2**32-1 # Assuming 32 bit user-ids
# Name or path of the pgrep command
PGREP = "pgrep"
......@@ -130,6 +130,18 @@ class BaseHypervisor(object):
"""
raise NotImplementedError
def CleanupInstance(self, instance_name):
"""Cleanup after a stopped instance
This is an optional method, used by hypervisors that need to cleanup after
an instance has been stopped.
@type instance_name: string
@param instance_name: instance name to cleanup after
"""
pass
def RebootInstance(self, instance):
"""Reboot an instance."""
raise NotImplementedError
......
......@@ -37,6 +37,8 @@ from ganeti import constants
from ganeti import errors
from ganeti import serializer
from ganeti import objects
from ganeti import uidpool
from ganeti import ssconf
from ganeti.hypervisor import hv_base
......@@ -45,9 +47,10 @@ class KVMHypervisor(hv_base.BaseHypervisor):
_ROOT_DIR = constants.RUN_GANETI_DIR + "/kvm-hypervisor"
_PIDS_DIR = _ROOT_DIR + "/pid" # contains live instances pids
_UIDS_DIR = _ROOT_DIR + "/uid" # contains instances reserved uids
_CTRL_DIR = _ROOT_DIR + "/ctrl" # contains instances control sockets
_CONF_DIR = _ROOT_DIR + "/conf" # contains instances startup data
_DIRS = [_ROOT_DIR, _PIDS_DIR, _CTRL_DIR, _CONF_DIR]
_DIRS = [_ROOT_DIR, _PIDS_DIR, _UIDS_DIR, _CTRL_DIR, _CONF_DIR]
PARAMETERS = {
constants.HV_KERNEL_PATH: hv_base.OPT_FILE_CHECK,
......@@ -109,6 +112,13 @@ class KVMHypervisor(hv_base.BaseHypervisor):
"""
return utils.PathJoin(cls._PIDS_DIR, instance_name)
@classmethod
def _InstanceUidFile(cls, instance_name):
"""Returns the instance uidfile.
"""
return utils.PathJoin(cls._UIDS_DIR, instance_name)
@classmethod
def _InstancePidInfo(cls, pid):
"""Check pid file for instance information.
......@@ -217,6 +227,22 @@ class KVMHypervisor(hv_base.BaseHypervisor):
"""
return utils.PathJoin(cls._CONF_DIR, "%s.runtime" % instance_name)
@classmethod
def _TryReadUidFile(cls, uid_file):
"""Try to read a uid file
"""
if os.path.exists(uid_file):
try:
uid = int(utils.ReadFile(uid_file))
except EnvironmentError:
logging.warning("Can't read uid file", exc_info=True)
return None
except (TypeError, ValueError):
logging.warning("Can't parse uid file contents", exc_info=True)
return None
return uid
@classmethod
def _RemoveInstanceRuntimeFiles(cls, pidfile, instance_name):