Commit 858f3d18 authored by Iustin Pop's avatar Iustin Pop
Browse files

Add disk copy support at backend and the rpc level



This uses a simple 'dd if=… | ssh $target dd of=…' method, like the
ExportSnapshot (which uses the OS export; here we want full disk-level
copy and not any FS-level changes).
Signed-off-by: default avatarIustin Pop <iustin@google.com>
Reviewed-by: default avatarMichael Hanselmann <hansmi@google.com>
parent 4b5e8271
......@@ -256,6 +256,15 @@ class NodeHttpServer(http.server.HttpServer):
disks = [objects.Disk.FromDict(cf) for cf in params[0]]
return backend.BlockdevGetsize(disks)
@staticmethod
def perspective_blockdev_export(params):
"""Compute the sizes of the given block devices.
"""
disk = objects.Disk.FromDict(params[0])
dest_node, dest_path, cluster_name = params[1:]
return backend.BlockdevExport(disk, dest_node, dest_path, cluster_name)
# blockdev/drbd specific methods ----------
@staticmethod
......
......@@ -1459,6 +1459,54 @@ def BlockdevGetsize(disks):
return result
def BlockdevExport(disk, dest_node, dest_path, cluster_name):
"""Export a block device to a remote node.
@type disk: L{objects.Disk}
@param disk: the description of the disk to export
@type dest_node: str
@param dest_node: the destination node to export to
@type dest_path: str
@param dest_path: the destination path on the target node
@type cluster_name: str
@param cluster_name: the cluster name, needed for SSH hostalias
@rtype: None
"""
real_disk = _RecursiveFindBD(disk)
if real_disk is None:
_Fail("Block device '%s' is not set up", disk)
real_disk.Open()
# the block size on the read dd is 1MiB to match our units
expcmd = utils.BuildShellCmd("set -e; set -o pipefail; "
"dd if=%s bs=1048576 count=%s",
real_disk.dev_path, str(disk.size))
# we set here a smaller block size as, due to ssh buffering, more
# than 64-128k will mostly ignored; we use nocreat to fail if the
# device is not already there or we pass a wrong path; we use
# notrunc to no attempt truncate on an LV device; we use oflag=dsync
# to not buffer too much memory; this means that at best, we flush
# every 64k, which will not be very fast
destcmd = utils.BuildShellCmd("dd of=%s conv=nocreat,notrunc bs=65536"
" oflag=dsync", dest_path)
remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node,
constants.GANETI_RUNAS,
destcmd)
# all commands have been checked, so we're safe to combine them
command = '|'.join([expcmd, utils.ShellQuoteArgs(remotecmd)])
result = utils.RunCmd(["bash", "-c", command])
if result.failed:
_Fail("Disk copy command '%s' returned error: %s"
" output: %s", command, result.fail_reason, result.output)
def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
"""Write a file to the filesystem.
......
......@@ -961,6 +961,17 @@ class RpcRunner(object):
return self._SingleNodeCall(node, "blockdev_grow",
[cf_bdev.ToDict(), amount])
def call_blockdev_export(self, node, cf_bdev,
dest_node, dest_path, cluster_name):
"""Export a given disk to another node.
This is a single-node call.
"""
return self._SingleNodeCall(node, "blockdev_export",
[cf_bdev.ToDict(), dest_node, dest_path,
cluster_name])
def call_blockdev_snapshot(self, node, cf_bdev):
"""Request a snapshot of the given block device.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment