From 868eab677945f2afa231ee984017d9c0ac95ace5 Mon Sep 17 00:00:00 2001 From: Constantinos Venetsanopoulos <cven@grnet.gr> Date: Mon, 12 Mar 2012 17:49:18 +0200 Subject: [PATCH] Implement the External Storage Interface With this commit we introduce the External Storage Interface to Ganeti, abbreviated: ExtStorage Interface. The ExtStorage Interface provides Ganeti with the ability to interact with externally connected shared storage pools, visible by all VM-capable nodes. This means that Ganeti is able to handle VM disks that reside inside a NAS/SAN or any distributed block storage provider. The ExtStorage Interface provides a clear API, heavily inspired by the gnt-os-interface API, that can be used by storage vendors or sysadmins to write simple ExtStorage Providers (correlated to gnt-os-interface's OS Definitions). Those Providers will glue externally attached shared storage with Ganeti, without the need of preprovisioned block devices on Ganeti VM-capable nodes as confined be the current `blockdev' disk template. To do so, we implement a new disk template called `ext' (of type DTS_EXT_MIRROR) that passes control to externally provided scripts (the ExtStorage Provider) for the template's basic functions: create / attach / detach / remove / grow The scripts reside under ES_SEARCH_PATH (correlated to OS_SEARCH_PATH) and only one ExtStorage Provider is supported called `ext'. The disk's logical id is the tuple ('ext', UUID.ext.diskX), where UUID is generated as in disk template `plain' and X is the disk's index. Signed-off-by: Constantinos Venetsanopoulos <cven@grnet.gr> --- Makefile.am | 1 + configure.ac | 12 ++ lib/bdev.py | 304 ++++++++++++++++++++++++++++++++++++++ lib/client/gnt_cluster.py | 2 + lib/cmdlib.py | 17 ++- lib/constants.py | 42 +++++- lib/objects.py | 20 ++- tools/burnin | 1 + 8 files changed, 388 insertions(+), 11 deletions(-) diff --git a/Makefile.am b/Makefile.am index 2e2b92f0c..dca6ef615 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1112,6 +1112,7 @@ lib/_autoconf.py: Makefile | lib/.dir echo "SSH_CONFIG_DIR = '$(SSH_CONFIG_DIR)'"; \ echo "EXPORT_DIR = '$(EXPORT_DIR)'"; \ echo "OS_SEARCH_PATH = [$(OS_SEARCH_PATH)]"; \ + echo "ES_SEARCH_PATH = [$(ES_SEARCH_PATH)]"; \ echo "XEN_BOOTLOADER = '$(XEN_BOOTLOADER)'"; \ echo "XEN_KERNEL = '$(XEN_KERNEL)'"; \ echo "XEN_INITRD = '$(XEN_INITRD)'"; \ diff --git a/configure.ac b/configure.ac index ebc00b36c..b3f31f857 100644 --- a/configure.ac +++ b/configure.ac @@ -60,6 +60,18 @@ AC_ARG_WITH([os-search-path], [os_search_path="'/srv/ganeti/os'"]) AC_SUBST(OS_SEARCH_PATH, $os_search_path) +# --with-extstorage-search-path=... +# same black sed magic for quoting of the strings in the list +AC_ARG_WITH([extstorage-search-path], + [AS_HELP_STRING([--with-extstorage-search-path=LIST], + [comma separated list of directories to] + [ search for External Storage Providers] + [ (default is /srv/ganeti/extstorage)] + )], + [es_search_path=`echo -n "$withval" | sed -e "s/\([[^,]]*\)/'\1'/g"`], + [es_search_path="'/srv/ganeti/extstorage'"]) +AC_SUBST(ES_SEARCH_PATH, $es_search_path) + # --with-iallocator-search-path=... # do a bit of black sed magic to for quoting of the strings in the list AC_ARG_WITH([iallocator-search-path], diff --git a/lib/bdev.py b/lib/bdev.py index 1d8586793..17b0faee0 100644 --- a/lib/bdev.py +++ b/lib/bdev.py @@ -2637,11 +2637,315 @@ class RADOSBlockDevice(BlockDev): result.fail_reason, result.output) +class ExtStorageDevice(BlockDev): + """A block device provided by an ExtStorage Provider. + + This class implements the External Storage Interface, which means + handling of the externally provided block devices. + + """ + def __init__(self, unique_id, children, size, params): + """Attaches to an extstorage block device. + + """ + super(ExtStorageDevice, self).__init__(unique_id, children, size, params) + if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: + raise ValueError("Invalid configuration data %s" % str(unique_id)) + + self.driver, self.vol_name = unique_id + + self.major = self.minor = None + self.Attach() + + @classmethod + def Create(cls, unique_id, children, size, params): + """Create a new extstorage device. + + Provision a new volume using an extstorage provider, which will + then be mapped to a block device. + + """ + if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2: + raise errors.ProgrammerError("Invalid configuration data %s" % + str(unique_id)) + + # Call the External Storage's create script, + # to provision a new Volume inside the External Storage + _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id, str(size)) + + return ExtStorageDevice(unique_id, children, size, params) + + def Remove(self): + """Remove the extstorage device. + + """ + if not self.minor and not self.Attach(): + # The extstorage device doesn't exist. + return + + # First shutdown the device (remove mappings). + self.Shutdown() + + # Call the External Storage's remove script, + # to remove the Volume from the External Storage + _ExtStorageAction(constants.ES_ACTION_REMOVE, self.unique_id) + + def Rename(self, new_id): + """Rename this device. + + """ + pass + + def Attach(self): + """Attach to an existing extstorage device. + + This method maps the extstorage volume that matches our name with + a corresponding block device and then attaches to this device. + + """ + self.attached = False + + # Call the External Storage's attach script, + # to attach an existing Volume to a block device under /dev + self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH, + self.unique_id) + + try: + st = os.stat(self.dev_path) + except OSError, err: + logging.error("Error stat()'ing %s: %s", self.dev_path, str(err)) + return False + + if not stat.S_ISBLK(st.st_mode): + logging.error("%s is not a block device", self.dev_path) + return False + + self.major = os.major(st.st_rdev) + self.minor = os.minor(st.st_rdev) + self.attached = True + + return True + + def Assemble(self): + """Assemble the device. + + """ + pass + + def Shutdown(self): + """Shutdown the device. + + """ + if not self.minor and not self.Attach(): + # The extstorage device doesn't exist. + return + + # Call the External Storage's detach script, + # to detach an existing Volume from it's block device under /dev + _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id) + + self.minor = None + self.dev_path = None + + def Open(self, force=False): + """Make the device ready for I/O. + + """ + pass + + def Close(self): + """Notifies that the device will no longer be used for I/O. + + """ + pass + + def Grow(self, amount, dryrun): + """Grow the Volume. + + @type amount: integer + @param amount: the amount (in mebibytes) to grow with + @type dryrun: boolean + @param dryrun: whether to execute the operation in simulation mode + only, without actually increasing the size + + """ + if not self.Attach(): + _ThrowError("Can't attach to extstorage device during Grow()") + + if dryrun: + # we do not support dry runs of resize operations for now. + return + + new_size = self.size + amount + + # Call the External Storage's grow script, + # to grow an existing Volume inside the External Storage + _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id, + str(self.size), grow=str(new_size)) + + +def _ExtStorageAction(action, unique_id, size=None, grow=None): + """Take an External Storage action. + + Take an External Storage action concerning or affecting + a specific Volume inside the External Storage. + + @type action: string + @param action: which action to perform. One of: + create / remove / grow / attach / detach + @type unique_id: tuple (driver, vol_name) + @param unique_id: a tuple containing the type of ExtStorage (driver) + and the Volume name + @type size: integer + @param size: the size of the Volume in mebibytes + @type grow: integer + @param grow: the new size in mebibytes (after grow) + @rtype: None or a block device path (during attach) + + """ + driver, vol_name = unique_id + + # Create an External Storage instance of type `driver' + status, inst_es = ExtStorageFromDisk(driver) + if not status: + _ThrowError("%s" % inst_es) + + # Create the basic environment for the driver's scripts + create_env = _ExtStorageEnvironment(unique_id, size, grow) + + # Do not use log file for action `attach' as we need + # to get the outpout from RunResult + # TODO: find a way to have a log file for attach too + logfile = None + if action is not constants.ES_ACTION_ATTACH: + logfile = _VolumeLogName(action, driver, vol_name) + + # Find out which external script to run according the given action + script_name = action + "_script" + script = getattr(inst_es, script_name) + + # Run the external script + result = utils.RunCmd([script], env=create_env, + cwd=inst_es.path, output=logfile,) + if result.failed: + logging.error("External storage's %s command '%s' returned" + " error: %s, logfile: %s, output: %s", + action, result.cmd, result.fail_reason, + logfile, result.output) + lines = [utils.SafeEncode(val) + for val in utils.TailFile(logfile, lines=20)] + _ThrowError("External storage's %s script failed (%s), last" + " lines in the log file:\n%s", + action, result.fail_reason, "\n".join(lines)) + + if action == constants.ES_ACTION_ATTACH: + return result.stdout + + +def ExtStorageFromDisk(name, base_dir=None): + """Create an ExtStorage instance from disk. + + This function will return an ExtStorage instance + if the given name is a valid ExtStorage name. + + @type base_dir: string + @keyword base_dir: Base directory containing ExtStorage installations. + Defaults to a search in all the ES_SEARCH_PATH dirs. + @rtype: tuple + @return: True and the ExtStorage instance if we find a valid one, or + False and the diagnose message on error + + """ + if base_dir is None: + es_dir = utils.FindFile(name, constants.ES_SEARCH_PATH, os.path.isdir) + else: + es_dir = utils.FindFile(name, [base_dir], os.path.isdir) + + if es_dir is None: + return False, ("Directory for External Storage Provider %s not" + " found in search path" % name) + + # ES Files dictionary, we will populate it with the absolute path + # names; if the value is True, then it is a required file, otherwise + # an optional one + es_files = dict.fromkeys(constants.ES_SCRIPTS, True) + + for filename in es_files: + es_files[filename] = utils.PathJoin(es_dir, filename) + + try: + st = os.stat(es_files[filename]) + except EnvironmentError, err: + return False, ("File '%s' under path '%s' is missing (%s)" % + (filename, es_dir, utils.ErrnoOrStr(err))) + + if not stat.S_ISREG(stat.S_IFMT(st.st_mode)): + return False, ("File '%s' under path '%s' is not a regular file" % + (filename, es_dir)) + + if filename in constants.ES_SCRIPTS: + if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR: + return False, ("File '%s' under path '%s' is not executable" % + (filename, es_dir)) + + es_obj = \ + objects.ExtStorage(name=name, path=es_dir, + create_script=es_files[constants.ES_SCRIPT_CREATE], + remove_script=es_files[constants.ES_SCRIPT_REMOVE], + grow_script=es_files[constants.ES_SCRIPT_GROW], + attach_script=es_files[constants.ES_SCRIPT_ATTACH], + detach_script=es_files[constants.ES_SCRIPT_DETACH]) + return True, es_obj + + +def _ExtStorageEnvironment(unique_id, size=None, grow=None): + """Calculate the environment for an External Storage script. + + @type unique_id: tuple (driver, vol_name) + @param unique_id: ExtStorage pool and name of the Volume + @type size: integer + @param size: size of the Volume in mebibytes + @rtype: dict + @return: dict of environment variables + + """ + vol_name = unique_id[1] + + result = {} + result['VOL_NAME'] = vol_name + + if size is not None: + result['VOL_SIZE'] = size + + if grow is not None: + result['VOL_NEW_SIZE'] = grow + + return result + + +def _VolumeLogName(kind, es_name, volume): + """Compute the ExtStorage log filename for a given Volume and operation. + + @type kind: string + @param kind: the operation type (e.g. create, remove etc.) + @type es_name: string + @param es_name: the ExtStorage name + @type volume: string + @param volume: the name of the Volume inside the External Storage + + """ + # TODO: Use tempfile.mkstemp to create unique filename + base = ("%s-%s-%s-%s.log" % + (kind, es_name, volume, utils.TimestampForFilename())) + return utils.PathJoin(constants.LOG_ES_DIR, base) + + DEV_MAP = { constants.LD_LV: LogicalVolume, constants.LD_DRBD8: DRBD8, constants.LD_BLOCKDEV: PersistentBlockDevice, constants.LD_RBD: RADOSBlockDevice, + constants.LD_EXT: ExtStorageDevice, } if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE: diff --git a/lib/client/gnt_cluster.py b/lib/client/gnt_cluster.py index de5bfb7bb..59e860813 100644 --- a/lib/client/gnt_cluster.py +++ b/lib/client/gnt_cluster.py @@ -453,6 +453,8 @@ def ShowClusterConfig(opts, args): ToStdout(" - primary ip version: %d", result["primary_ip_version"]) ToStdout(" - preallocation wipe disks: %s", result["prealloc_wipe_disks"]) ToStdout(" - OS search path: %s", utils.CommaJoin(constants.OS_SEARCH_PATH)) + ToStdout(" - ExtStorage Providers search path: %s", + utils.CommaJoin(constants.ES_SEARCH_PATH)) ToStdout("Default node parameters:") _PrintGroupedParams(result["ndparams"], roman=opts.roman_integers) diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 15a610031..320e855c7 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -8489,9 +8489,9 @@ class TLMigrateInstance(Tasklet): self._GoReconnect(False) self._WaitUntilSync() - # If the instance's disk template is `rbd' and there was a successful - # migration, unmap the device from the source node. - if self.instance.disk_template == constants.DT_RBD: + # If the instance's disk template is `rbd' or `ext' and there was a + # successful migration, unmap the device from the source node. + if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT): disks = _ExpandCheckDisks(instance, instance.disks) self.feedback_fn("* unmapping instance's disks from %s" % source_node) for disk in disks: @@ -8740,6 +8740,7 @@ def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names, _DISK_TEMPLATE_NAME_PREFIX = { constants.DT_PLAIN: "", constants.DT_RBD: ".rbd", + constants.DT_EXT: ".ext", } @@ -8749,6 +8750,7 @@ _DISK_TEMPLATE_DEVICE_TYPE = { constants.DT_SHARED_FILE: constants.LD_FILE, constants.DT_BLOCK: constants.LD_BLOCKDEV, constants.DT_RBD: constants.LD_RBD, + constants.DT_EXT: constants.LD_EXT, } @@ -8827,6 +8829,8 @@ def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node, disk[constants.IDISK_ADOPT]) elif template_name == constants.DT_RBD: logical_id_fn = lambda idx, _, disk: ("rbd", names[idx]) + elif template_name == constants.DT_EXT: + logical_id_fn = lambda idx, _, disk: ("ext", names[idx]) else: raise errors.ProgrammerError("Unknown disk template '%s'" % template_name) @@ -9097,6 +9101,7 @@ def _ComputeDiskSize(disk_template, disks): constants.DT_SHARED_FILE: sum(d[constants.IDISK_SIZE] for d in disks), constants.DT_BLOCK: 0, constants.DT_RBD: sum(d[constants.IDISK_SIZE] for d in disks), + constants.DT_EXT: sum(d[constants.IDISK_SIZE] for d in disks), } if disk_template not in req_size_dict: @@ -9937,6 +9942,9 @@ class LUInstanceCreate(LogicalUnit): # Any function that checks prerequisites can be placed here. # Check if there is enough space on the RADOS cluster. _CheckRADOSFreeSpace() + elif self.op.disk_template == constants.DT_EXT: + # FIXME: Function that checks prereqs if needed + pass else: # Check lv size requirements, if not adopting req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks) @@ -11649,7 +11657,8 @@ class LUInstanceGrowDisk(LogicalUnit): if instance.disk_template not in (constants.DT_FILE, constants.DT_SHARED_FILE, - constants.DT_RBD): + constants.DT_RBD, + constants.DT_EXT): # TODO: check the free disk space for file, when that feature will be # supported _CheckNodesFreeDiskPerVG(self, nodenames, diff --git a/lib/constants.py b/lib/constants.py index 02aa6f673..b39b4bb57 100644 --- a/lib/constants.py +++ b/lib/constants.py @@ -240,6 +240,7 @@ DAEMONS_LOGFILES = { } LOG_OS_DIR = LOG_DIR + "os" +LOG_ES_DIR = LOG_DIR + "extstorage" LOG_WATCHER = LOG_DIR + "watcher.log" LOG_COMMANDS = LOG_DIR + "commands.log" LOG_BURNIN = LOG_DIR + "burnin.log" @@ -261,6 +262,7 @@ SYSLOG_ONLY = "only" SYSLOG_SOCKET = "/dev/log" OS_SEARCH_PATH = _autoconf.OS_SEARCH_PATH +ES_SEARCH_PATH = _autoconf.ES_SEARCH_PATH EXPORT_DIR = _autoconf.EXPORT_DIR EXPORT_CONF_FILE = "config.ini" @@ -427,19 +429,21 @@ DT_FILE = "file" DT_SHARED_FILE = "sharedfile" DT_BLOCK = "blockdev" DT_RBD = "rbd" +DT_EXT = "ext" # the set of network-mirrored disk templates DTS_INT_MIRROR = frozenset([DT_DRBD8]) # the set of externally-mirrored disk templates (e.g. SAN, NAS) -DTS_EXT_MIRROR = frozenset([DT_SHARED_FILE, DT_BLOCK, DT_RBD]) +DTS_EXT_MIRROR = frozenset([DT_SHARED_FILE, DT_BLOCK, DT_RBD, DT_EXT]) # the set of non-lvm-based disk templates DTS_NOT_LVM = frozenset([DT_DISKLESS, DT_FILE, DT_SHARED_FILE, - DT_BLOCK, DT_RBD]) + DT_BLOCK, DT_RBD, DT_EXT]) # the set of disk templates which can be grown -DTS_GROWABLE = frozenset([DT_PLAIN, DT_DRBD8, DT_FILE, DT_SHARED_FILE, DT_RBD]) +DTS_GROWABLE = frozenset([DT_PLAIN, DT_DRBD8, DT_FILE, DT_SHARED_FILE, + DT_RBD, DT_EXT]) # the set of disk templates that allow adoption DTS_MAY_ADOPT = frozenset([DT_PLAIN, DT_BLOCK]) @@ -459,15 +463,17 @@ LD_DRBD8 = "drbd8" LD_FILE = "file" LD_BLOCKDEV = "blockdev" LD_RBD = "rbd" +LD_EXT = "ext" LOGICAL_DISK_TYPES = frozenset([ LD_LV, LD_DRBD8, LD_FILE, LD_BLOCKDEV, LD_RBD, + LD_EXT, ]) -LDS_BLOCK = frozenset([LD_LV, LD_DRBD8, LD_BLOCKDEV, LD_RBD]) +LDS_BLOCK = frozenset([LD_LV, LD_DRBD8, LD_BLOCKDEV, LD_RBD, LD_EXT]) # drbd constants DRBD_HMAC_ALG = "md5" @@ -563,7 +569,8 @@ DISK_TEMPLATES = frozenset([ DT_FILE, DT_SHARED_FILE, DT_BLOCK, - DT_RBD + DT_RBD, + DT_EXT ]) FILE_DRIVER = frozenset([FD_LOOP, FD_BLKTAP]) @@ -674,6 +681,26 @@ OS_PARAMETERS_FILE = "parameters.list" OS_VALIDATE_PARAMETERS = "parameters" OS_VALIDATE_CALLS = frozenset([OS_VALIDATE_PARAMETERS]) +# External Storage (ES) related constants +ES_ACTION_CREATE = "create" +ES_ACTION_REMOVE = "remove" +ES_ACTION_GROW = "grow" +ES_ACTION_ATTACH = "attach" +ES_ACTION_DETACH = "detach" + +ES_SCRIPT_CREATE = ES_ACTION_CREATE +ES_SCRIPT_REMOVE = ES_ACTION_REMOVE +ES_SCRIPT_GROW = ES_ACTION_GROW +ES_SCRIPT_ATTACH = ES_ACTION_ATTACH +ES_SCRIPT_DETACH = ES_ACTION_DETACH +ES_SCRIPTS = frozenset([ + ES_SCRIPT_CREATE, + ES_SCRIPT_REMOVE, + ES_SCRIPT_GROW, + ES_SCRIPT_ATTACH, + ES_SCRIPT_DETACH + ]) + # ssh constants SSH_CONFIG_DIR = _autoconf.SSH_CONFIG_DIR SSH_HOST_DSA_PRIV = SSH_CONFIG_DIR + "/ssh_host_dsa_key" @@ -1871,6 +1898,8 @@ DISK_LD_DEFAULTS = { LD_RBD: { LDP_POOL: "rbd" }, + LD_EXT: { + }, } # readability shortcuts @@ -1908,6 +1937,8 @@ DISK_DT_DEFAULTS = { DT_RBD: { RBD_POOL: DISK_LD_DEFAULTS[LD_RBD][LDP_POOL] }, + DT_EXT: { + }, } # we don't want to export the shortcuts @@ -2067,6 +2098,7 @@ VALID_ALLOC_POLICIES = [ # Temporary external/shared storage parameters BLOCKDEV_DRIVER_MANUAL = "manual" +EXTSTORAGE_SAMPLE_PROVIDER = "rbd" # qemu-img path, required for ovfconverter QEMUIMG_PATH = _autoconf.QEMUIMG_PATH diff --git a/lib/objects.py b/lib/objects.py index d79e08566..c5b8ce8e5 100644 --- a/lib/objects.py +++ b/lib/objects.py @@ -605,7 +605,8 @@ class Disk(ConfigObject): """ if self.dev_type in [constants.LD_LV, constants.LD_FILE, - constants.LD_BLOCKDEV, constants.LD_RBD]: + constants.LD_BLOCKDEV, constants.LD_RBD, + constants.LD_EXT]: result = [node] elif self.dev_type in constants.LDS_DRBD: result = [self.logical_id[0], self.logical_id[1]] @@ -681,7 +682,7 @@ class Disk(ConfigObject): """ if self.dev_type in (constants.LD_LV, constants.LD_FILE, - constants.LD_RBD): + constants.LD_RBD, constants.LD_EXT): self.size += amount elif self.dev_type == constants.LD_DRBD8: if self.children: @@ -1261,6 +1262,21 @@ class OS(ConfigObject): return cls.SplitNameVariant(name)[1] +class ExtStorage(ConfigObject): + """Config object representing an External Storage Provider. + + """ + __slots__ = [ + "name", + "path", + "create_script", + "remove_script", + "grow_script", + "attach_script", + "detach_script", + ] + + class NodeHvState(ConfigObject): """Hypvervisor state on a node. diff --git a/tools/burnin b/tools/burnin index bf93c72e8..fe106653e 100755 --- a/tools/burnin +++ b/tools/burnin @@ -462,6 +462,7 @@ class Burner(object): constants.DT_PLAIN, constants.DT_DRBD8, constants.DT_RBD, + constants.DT_EXT, ) if options.disk_template not in supported_disk_templates: Err("Unknown disk template '%s'" % options.disk_template) -- GitLab