From bd5e77f946b4b452d1056041d5796b3949da85d2 Mon Sep 17 00:00:00 2001 From: Guido Trotter <ultrotter@google.com> Date: Mon, 14 Jan 2008 16:01:51 +0000 Subject: [PATCH] Do instance export and import during burnin Instances get exported to a remote node, then removed and imported back to their original nodes. This should be an idempotent option from the instance point of view, and help making sure ImportExport is kept up to date. It will also help making burnin take a lot longer, which is nice to take a nap. "...but I'm doing a cluster burnin...". Unfortunately this subfeature is a bit jeopardized by the fact that the new code can be skipped with the --no-importexport option, but nobody needs to know that, do they? Reviewed-by: iustinp --- tools/burnin | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/tools/burnin b/tools/burnin index c60e6a477..aeeb79290 100755 --- a/tools/burnin +++ b/tools/burnin @@ -23,6 +23,7 @@ """ +import os import sys import optparse from itertools import izip, islice, cycle @@ -121,6 +122,9 @@ class Burner(object): parser.add_option("--no-failover", dest="do_failover", help="Skip instance failovers", action="store_false", default=True) + parser.add_option("--no-importexport", dest="do_importexport", + help="Skip instance export/import", action="store_false", + default=True) parser.add_option("-t", "--disk-template", dest="disk_template", choices=("plain", "remote_raid1", "drbd"), default="remote_raid1", @@ -258,6 +262,51 @@ class Burner(object): Log("- Failover instance %s" % (instance)) self.ExecOp(op) + def ImportExport(self): + """Export the instance, delete it, and import it back. + + """ + + mytor = izip(cycle(self.nodes), + islice(cycle(self.nodes), 1, None), + islice(cycle(self.nodes), 2, None), + self.instances) + + for pnode, snode, enode, instance in mytor: + exp_op = opcodes.OpExportInstance(instance_name=instance, + target_node=enode, + shutdown=True) + rem_op = opcodes.OpRemoveInstance(instance_name=instance) + nam_op = opcodes.OpQueryInstances(output_fields=["name"], + names=[instance]) + full_name = self.ExecOp(nam_op)[0][0] + imp_dir = os.path.join(constants.EXPORT_DIR, full_name) + imp_op = opcodes.OpCreateInstance(instance_name=instance, + mem_size=128, + disk_size=self.opts.os_size, + swap_size=self.opts.swap_size, + disk_template=self.opts.disk_template, + mode=constants.INSTANCE_IMPORT, + src_node=enode, + src_path=imp_dir, + pnode=pnode, + snode=snode, + vcpus=1, + start=True, + ip_check=True, + wait_for_sync=True, + mac="auto") + + Log("- Export instance %s to node %s" % (instance, enode)) + self.ExecOp(exp_op) + Log("- Remove instance %s" % (instance)) + self.ExecOp(rem_op) + self.to_rem.remove(instance) + Log("- Import instance %s from node %s to node %s" % + (instance, enode, pnode)) + self.ExecOp(imp_op) + self.to_rem.append(instance) + def StopStart(self): """Stop/start the instances.""" for instance in self.instances: @@ -307,6 +356,9 @@ class Burner(object): if opts.do_failover and opts.disk_template in constants.DTS_NET_MIRROR: self.Failover() + if opts.do_importexport: + self.ImportExport() + self.StopStart() has_err = False finally: -- GitLab