Commit 580c971e authored by Michael Hanselmann's avatar Michael Hanselmann
Browse files

Merge branch 'devel-2.4'



* devel-2.4: (24 commits)
  mlock: fail gracefully if libc.so.6 cannot be loaded
  Allow creating the DRBD metadev in a different VG
  Make _GenerateDRBD8Branch accept different VG names
  Fix WriteFile with unicode data
  Replace disks: keep the meta device in the same VG
  Fix for multiple VGs - PlainToDrbd and replace-disks
  Fix potential data-loss in utils.WriteFile
  Improve error messages in cluster verify/OS
  Prevent readding of the master node
  Fix punctuation in an error message
  cli: Fix wrong argument kind for groups
  Quote filename in gnt-instance.8
  Fix typo in LUGroupAssignNodes
  gnt-instance info: automatically request locking
  Document the dependency on OOB for gnt-node power
  Fix master IP activation in failover with no-voting
  disk wiping: fix bug in chunk size computation
  Fix bug in watcher
  Release locks before wiping disks during instance creation
  utils.WriteFile: Close file before renaming
  ...

Conflicts:
	lib/cmdlib.py: Disk parameter constants
	man/gnt-instance.rst: Trivial
	man/gnt-node.rst: Trivial
Signed-off-by: default avatarMichael Hanselmann <hansmi@google.com>
Reviewed-by: default avatarIustin Pop <iustin@google.com>
parents 661515f6 adc523ab
......@@ -979,6 +979,11 @@ check-local: check-dirs
$(CHECK_PYTHON_CODE) $(check_python_code)
$(CHECK_VERSION) $(VERSION) $(top_srcdir)/NEWS
$(CHECK_NEWS) < $(top_srcdir)/NEWS
expver=$(VERSION_MAJOR).$(VERSION_MINOR); \
if test "`head -n 1 $(top_srcdir)/README`" != "Ganeti $$expver"; then \
echo "Incorrect version in README, expected $$expver"; \
exit 1; \
fi
.PHONY: hs-check
hs-check: htools/test
......
Ganeti 2.2
Ganeti 2.4
==========
For installation instructions, read the INSTALL and the doc/install.html
......
......@@ -345,7 +345,8 @@ ARGS_MANY_NODES = [ArgNode()]
ARGS_MANY_GROUPS = [ArgGroup()]
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
# TODO
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
......
......@@ -1129,7 +1129,8 @@ def ShowInstanceConfig(opts, args):
return 1
retcode = 0
op = opcodes.OpInstanceQueryData(instances=args, static=opts.static)
op = opcodes.OpInstanceQueryData(instances=args, static=opts.static,
use_locking=not opts.static)
result = SubmitOpCode(op, opts=opts)
if not result:
ToStdout("No instances.")
......
......@@ -172,9 +172,9 @@ def AddNode(opts, args):
readd = opts.readd
try:
output = cl.QueryNodes(names=[node], fields=['name', 'sip'],
output = cl.QueryNodes(names=[node], fields=['name', 'sip', 'master'],
use_locking=False)
node_exists, sip = output[0]
node_exists, sip, is_master = output[0]
except (errors.OpPrereqError, errors.OpExecError):
node_exists = ""
sip = None
......@@ -184,6 +184,9 @@ def AddNode(opts, args):
ToStderr("Node %s not in the cluster"
" - please retry without '--readd'", node)
return 1
if is_master:
ToStderr("Node %s is the master, cannot readd", node)
return 1
else:
if node_exists:
ToStderr("Node %s already in the cluster (as %s)"
......
......@@ -1891,6 +1891,7 @@ class LUClusterVerify(LogicalUnit):
assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
for os_name, os_data in nimg.oslist.items():
assert os_data, "Empty OS status for OS %s?!" % os_name
f_path, f_status, f_diag, f_var, f_param, f_api = os_data[0]
......@@ -1918,11 +1919,12 @@ class LUClusterVerify(LogicalUnit):
continue
for kind, a, b in [("API version", f_api, b_api),
("variants list", f_var, b_var),
("parameters", f_param, b_param)]:
("parameters", beautify_params(f_param),
beautify_params(b_param))]:
_ErrorIf(a != b, self.ENODEOS, node,
"OS %s %s differs from reference node %s: %s vs. %s",
"OS %s for %s differs from reference node %s: [%s] vs. [%s]",
kind, os_name, base.name,
utils.CommaJoin(a), utils.CommaJoin(b))
utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
# check any missing OSes
missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
......@@ -4255,6 +4257,11 @@ class LUNodeAdd(LogicalUnit):
self.hostname = netutils.GetHostname(name=self.op.node_name,
family=self.primary_ip_family)
self.op.node_name = self.hostname.name
if self.op.readd and self.op.node_name == self.cfg.GetMasterNode():
raise errors.OpPrereqError("Cannot readd the master node",
errors.ECODE_STATE)
if self.op.readd and self.op.group:
raise errors.OpPrereqError("Cannot pass a node group when a node is"
" being readded", errors.ECODE_INVAL)
......@@ -4490,7 +4497,7 @@ class LUNodeAdd(LogicalUnit):
feedback_fn("ssh/hostname verification failed"
" (checking from %s): %s" %
(verifier, nl_payload[failed]))
raise errors.OpExecError("ssh/hostname verification failed.")
raise errors.OpExecError("ssh/hostname verification failed")
if self.op.readd:
_RedistributeAncillaryFiles(self)
......@@ -6981,17 +6988,18 @@ def _GenerateUniqueNames(lu, exts):
return results
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgname, names, iv_name,
p_minor, s_minor):
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
iv_name, p_minor, s_minor):
"""Generate a drbd8 device complete with its children.
"""
assert len(vgnames) == len(names) == 2
port = lu.cfg.AllocatePort()
shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
logical_id=(vgname, names[0]))
logical_id=(vgnames[0], names[0]))
dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
logical_id=(vgname, names[1]))
logical_id=(vgnames[1], names[1]))
drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
logical_id=(primary, secondary, port,
p_minor, s_minor,
......@@ -7046,9 +7054,11 @@ def _GenerateDiskTemplate(lu, template_name,
names.append(lv_prefix + "_meta")
for idx, disk in enumerate(disk_info):
disk_index = idx + base_index
vg = disk.get(constants.IDISK_VG, vgname)
data_vg = disk.get(constants.IDISK_VG, vgname)
meta_vg = disk.get(constants.IDISK_METAVG, data_vg)
disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
disk[constants.IDISK_SIZE], vg,
disk[constants.IDISK_SIZE],
[data_vg, meta_vg],
names[idx * 2:idx * 2 + 2],
"disk/%d" % disk_index,
minors[idx * 2], minors[idx * 2 + 1])
......@@ -7150,14 +7160,17 @@ def _WipeDisks(lu, instance):
try:
for idx, device in enumerate(instance.disks):
lu.LogInfo("* Wiping disk %d", idx)
logging.info("Wiping disk %d for instance %s, node %s",
idx, instance.name, node)
# The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
# MAX_WIPE_CHUNK at max
wipe_chunk_size = min(constants.MAX_WIPE_CHUNK, device.size / 100.0 *
constants.MIN_WIPE_CHUNK_PERCENT)
# we _must_ make this an int, otherwise rounding errors will
# occur
wipe_chunk_size = int(wipe_chunk_size)
lu.LogInfo("* Wiping disk %d", idx)
logging.info("Wiping disk %d for instance %s, node %s using"
" chunk size %s", idx, instance.name, node, wipe_chunk_size)
offset = 0
size = device.size
......@@ -7166,6 +7179,8 @@ def _WipeDisks(lu, instance):
while offset < size:
wipe_size = min(wipe_chunk_size, size - offset)
logging.debug("Wiping disk %d, offset %s, chunk %s",
idx, offset, wipe_size)
result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
result.Raise("Could not wipe disk %d at offset %d for size %d" %
(idx, offset, wipe_size))
......@@ -7964,10 +7979,13 @@ class LUInstanceCreate(LogicalUnit):
except (TypeError, ValueError):
raise errors.OpPrereqError("Invalid disk size '%s'" % size,
errors.ECODE_INVAL)
data_vg = disk.get(constants.IDISK_VG, default_vg)
new_disk = {
constants.IDISK_SIZE: size,
constants.IDISK_MODE: mode,
constants.IDISK_VG: disk.get(constants.IDISK_VG, default_vg),
constants.IDISK_VG: data_vg,
constants.IDISK_METAVG: disk.get(constants.IDISK_METAVG, data_vg),
}
if constants.IDISK_ADOPT in disk:
new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
......@@ -8236,18 +8254,6 @@ class LUInstanceCreate(LogicalUnit):
self.cfg.ReleaseDRBDMinors(instance)
raise
if self.cfg.GetClusterInfo().prealloc_wipe_disks:
feedback_fn("* wiping instance disks...")
try:
_WipeDisks(self, iobj)
except errors.OpExecError:
self.LogWarning("Device wiping failed, reverting...")
try:
_RemoveDisks(self, iobj)
finally:
self.cfg.ReleaseDRBDMinors(instance)
raise
feedback_fn("adding instance %s to cluster config" % instance)
self.cfg.AddInstance(iobj, self.proc.GetECId())
......@@ -8266,7 +8272,20 @@ class LUInstanceCreate(LogicalUnit):
self.context.glm.release(locking.LEVEL_NODE)
del self.acquired_locks[locking.LEVEL_NODE]
if self.op.wait_for_sync:
disk_abort = False
if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
feedback_fn("* wiping instance disks...")
try:
_WipeDisks(self, iobj)
except errors.OpExecError, err:
logging.exception("Wiping disks failed")
self.LogWarning("Wiping instance disks failed (%s)", err)
disk_abort = True
if disk_abort:
# Something is already wrong with the disks, don't do anything else
pass
elif self.op.wait_for_sync:
disk_abort = not _WaitForSync(self, iobj)
elif iobj.disk_template in constants.DTS_INT_MIRROR:
# make sure the disks are not degraded (still sync-ing is ok)
......@@ -8852,7 +8871,6 @@ class TLReplaceDisks(Tasklet):
(node_name, self.instance.name))
def _CreateNewStorage(self, node_name):
vgname = self.cfg.GetVGName()
iv_names = {}
for idx, dev in enumerate(self.instance.disks):
......@@ -8866,10 +8884,12 @@ class TLReplaceDisks(Tasklet):
lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
names = _GenerateUniqueNames(self.lu, lv_names)
vg_data = dev.children[0].logical_id[0]
lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
logical_id=(vgname, names[0]))
logical_id=(vg_data, names[0]))
vg_meta = dev.children[1].logical_id[0]
lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
logical_id=(vgname, names[1]))
logical_id=(vg_meta, names[1]))
new_lvs = [lv_data, lv_meta]
old_lvs = dev.children
......@@ -10025,7 +10045,8 @@ class LUInstanceSetParams(LogicalUnit):
snode = self.op.remote_node
# create a fake disk info for _GenerateDiskTemplate
disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode}
disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
constants.IDISK_VG: d.logical_id[0]}
for d in instance.disks]
new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
instance.name, pnode, [snode],
......@@ -10789,7 +10810,7 @@ class LUGroupAssignNodes(NoHooksLU):
if previous_splits:
self.LogWarning("In addition, these already-split instances continue"
" to be spit across groups: %s",
" to be split across groups: %s",
utils.CommaJoin(utils.NiceSort(previous_splits)))
def Exec(self, feedback_fn):
......@@ -10878,7 +10899,8 @@ class _GroupQuery(_QueryBase):
missing.append(name)
if missing:
raise errors.OpPrereqError("Some groups do not exist: %s" % missing,
raise errors.OpPrereqError("Some groups do not exist: %s" %
utils.CommaJoin(missing),
errors.ECODE_NOENT)
def DeclareLocks(self, lu, level):
......
......@@ -753,11 +753,13 @@ IDISK_SIZE = "size"
IDISK_MODE = "mode"
IDISK_ADOPT = "adopt"
IDISK_VG = "vg"
IDISK_METAVG = "metavg"
IDISK_PARAMS_TYPES = {
IDISK_SIZE: VTYPE_SIZE,
IDISK_MODE: VTYPE_STRING,
IDISK_ADOPT: VTYPE_STRING,
IDISK_VG: VTYPE_STRING,
IDISK_METAVG: VTYPE_STRING,
}
IDISK_PARAMS = frozenset(IDISK_PARAMS_TYPES.keys())
......
......@@ -573,26 +573,24 @@ def CheckMasterd(options, args):
# If CheckMaster didn't fail we believe we are the master, but we have to
# confirm with the other nodes.
if options.no_voting:
if options.yes_do_it:
return
if not options.yes_do_it:
sys.stdout.write("The 'no voting' option has been selected.\n")
sys.stdout.write("This is dangerous, please confirm by"
" typing uppercase 'yes': ")
sys.stdout.flush()
sys.stdout.write("The 'no voting' option has been selected.\n")
sys.stdout.write("This is dangerous, please confirm by"
" typing uppercase 'yes': ")
sys.stdout.flush()
confirmation = sys.stdin.readline().strip()
if confirmation != "YES":
print >> sys.stderr, "Aborting."
sys.exit(constants.EXIT_FAILURE)
confirmation = sys.stdin.readline().strip()
if confirmation != "YES":
print >> sys.stderr, "Aborting."
else:
# CheckAgreement uses RPC and threads, hence it needs to be run in
# a separate process before we call utils.Daemonize in the current
# process.
if not utils.RunInSeparateProcess(CheckAgreement):
sys.exit(constants.EXIT_FAILURE)
return
# CheckAgreement uses RPC and threads, hence it needs to be run in a separate
# process before we call utils.Daemonize in the current process.
if not utils.RunInSeparateProcess(CheckAgreement):
sys.exit(constants.EXIT_FAILURE)
# ActivateMasterIP also uses RPC/threads, so we run it again via a
# separate process.
......
......@@ -117,36 +117,55 @@ def WriteFile(file_name, fn=None, data=None,
if backup and not dry_run and os.path.isfile(file_name):
CreateBackup(file_name)
dir_name, base_name = os.path.split(file_name)
fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
# Whether temporary file needs to be removed (e.g. if any error occurs)
do_remove = True
# here we need to make sure we remove the temp file, if any error
# leaves it in place
# Function result
result = None
(dir_name, base_name) = os.path.split(file_name)
(fd, new_name) = tempfile.mkstemp(suffix=".new", prefix=base_name,
dir=dir_name)
try:
if uid != -1 or gid != -1:
os.chown(new_name, uid, gid)
if mode:
os.chmod(new_name, mode)
if callable(prewrite):
prewrite(fd)
if data is not None:
os.write(fd, data)
else:
fn(fd)
if callable(postwrite):
postwrite(fd)
os.fsync(fd)
if atime is not None and mtime is not None:
os.utime(new_name, (atime, mtime))
try:
if uid != -1 or gid != -1:
os.chown(new_name, uid, gid)
if mode:
os.chmod(new_name, mode)
if callable(prewrite):
prewrite(fd)
if data is not None:
if isinstance(data, unicode):
data = data.encode()
assert isinstance(data, str)
to_write = len(data)
offset = 0
while offset < to_write:
written = os.write(fd, buffer(data, offset))
assert written >= 0
assert written <= to_write - offset
offset += written
assert offset == to_write
else:
fn(fd)
if callable(postwrite):
postwrite(fd)
os.fsync(fd)
if atime is not None and mtime is not None:
os.utime(new_name, (atime, mtime))
finally:
# Close file unless the file descriptor should be returned
if close:
os.close(fd)
else:
result = fd
# Rename file to destination name
if not dry_run:
os.rename(new_name, file_name)
# Successful, no need to remove anymore
do_remove = False
finally:
if close:
os.close(fd)
result = None
else:
result = fd
if do_remove:
RemoveFile(new_name)
......
......@@ -51,7 +51,11 @@ def Mlockall(_ctypes=ctypes):
if _ctypes is None:
raise errors.NoCtypesError()
libc = _ctypes.cdll.LoadLibrary("libc.so.6")
try:
libc = _ctypes.cdll.LoadLibrary("libc.so.6")
except EnvironmentError, err:
logging.error("Failure trying to load libc: %s", err)
libc = None
if libc is None:
logging.error("Cannot set memory lock, ctypes cannot load libc")
return
......
......@@ -71,7 +71,7 @@ KEY_RESTART_WHEN = "restart_when"
KEY_BOOT_ID = "bootid"
# Global client object
# Global LUXI client object
client = None
......@@ -107,8 +107,9 @@ def RunWatcherHooks():
try:
results = utils.RunParts(hooks_dir)
except Exception, msg: # pylint: disable-msg=W0703
logging.critical("RunParts %s failed: %s", hooks_dir, msg)
except Exception: # pylint: disable-msg=W0703
logging.exception("RunParts %s failed: %s", hooks_dir)
return
for (relname, status, runresult) in results:
if status == constants.RUNPARTS_SKIP:
......
......@@ -28,7 +28,7 @@ ADD
| **add**
| {-t {diskless | file \| plain \| drbd}}
| {--disk=*N*: {size=*VAL* \| adopt=*LV*}[,vg=*VG*][,mode=*ro\|rw*]
| {--disk=*N*: {size=*VAL* \| adopt=*LV*}[,vg=*VG*][,metavg=*VG*][,mode=*ro\|rw*]
| \| -s *SIZE*}
| [--no-ip-check] [--no-name-check] [--no-start] [--no-install]
| [--net=*N* [:options...] \| --no-nics]
......@@ -49,10 +49,12 @@ The ``disk`` option specifies the parameters for the disks of the
instance. The numbering of disks starts at zero, and at least one disk
needs to be passed. For each disk, either the size or the adoption
source needs to be given, and optionally the access mode (read-only or
the default of read-write) and LVM volume group can also be specified.
The size is interpreted (when no unit is given) in mebibytes. You can
also use one of the suffixes *m*, *g* or *t* to specify the exact the
units used; these suffixes map to mebibytes, gibibytes and tebibytes.
the default of read-write) and the LVM volume group can also be
specified (via the ``vg`` key). For DRBD devices, a different VG can
be specified for the metadata device using the ``metavg`` key. The
size is interpreted (when no unit is given) in mebibytes. You can also
use one of the suffixes *m*, *g* or *t* to specify the exact the units
used; these suffixes map to mebibytes, gibibytes and tebibytes.
When using the ``adopt`` key in the disk definition, Ganeti will
reuse those volumes (instead of creating new ones) as the
......@@ -675,7 +677,7 @@ The default output field list is: ``name``, ``os``, ``pnode``,
LIST-FIELDS
~~~~~~~~~~
~~~~~~~~~~~
**list-fields** [field...]
......@@ -709,7 +711,7 @@ MODIFY
| [-H *HYPERVISOR\_PARAMETERS*]
| [-B *BACKEND\_PARAMETERS*]
| [--net add*[:options]* \| --net remove \| --net *N:options*]
| [--disk add:size=*SIZE*[,vg=*VG*] \| --disk remove \|
| [--disk add:size=*SIZE*[,vg=*VG*][,metavg=*VG*] \| --disk remove \|
| --disk *N*:mode=*MODE*]
| [-t plain | -t drbd -n *new_secondary*]
| [--os-type=*OS* [--force-variant]]
......@@ -733,10 +735,12 @@ conversion. When changing from the plain to the drbd disk template, a
new secondary node must be specified via the ``-n`` option.
The ``--disk add:size=``*SIZE* option adds a disk to the instance. The
optional ``vg=``*VG* option specifies LVM volume group other than default
vg to create disk on. The ``--disk remove`` option will remove the last
disk of the instance. The ``--disk`` *N*``:mode=``*MODE* option will change
the mode of the Nth disk of the instance between read-only (``ro``) and
optional ``vg=``*VG* option specifies LVM volume group other than
default vg to create the disk on. For DRBD disks, the ``metavg=``*VG*
option specifies the volume group for the metadata device. The
``--disk remove`` option will remove the last disk of the
instance. The ``--disk`` *N*``:mode=``*MODE* option will change the
mode of the Nth disk of the instance between read-only (``ro``) and
read-write (``rw``).
The ``--net add:``*options* option will add a new NIC to the
......@@ -1325,7 +1329,7 @@ characters, the entire operation will abort.
If the ``--from`` option is given, the list of tags will be
extended with the contents of that file (each line becomes a tag).
In this case, there is not need to pass tags on the command line
(if you do, both sources will be used). A file name of - will be
(if you do, both sources will be used). A file name of ``-`` will be
interpreted as stdin.
LIST-TAGS
......@@ -1346,8 +1350,8 @@ existing on the node, the entire operation will abort.
If the ``--from`` option is given, the list of tags to be removed will
be extended with the contents of that file (each line becomes a tag).
In this case, there is not need to pass tags on the command line (if
you do, tags from both sources will be removed). A file name of - will
be interpreted as stdin.
you do, tags from both sources will be removed). A file name of ``-``
will be interpreted as stdin.
.. vim: set textwidth=72 :
.. Local Variables:
......
......@@ -498,8 +498,12 @@ POWER
[``--power-delay``] on|off|cycle|status [*nodes*]
This commands calls out to out-of-band management to change the power
state of given node. With ``status`` you get the power status as
reported by the out-of-band management script.
state of given node. With ``status`` you get the power status as reported
by the out-of-band managment script.
Note that this command will only work if the out-of-band functionality
is configured and enabled on the cluster. If this is not the case,
please use the **powercycle** command above.
Using ``--force`` you skip the confirmation to do the operation.
Currently this only has effect on ``off`` and ``cycle``. On those two
......
......@@ -234,11 +234,16 @@ class TestListVisibleFiles(unittest.TestCase):
class TestWriteFile(unittest.TestCase):
def setUp(self):
self.tmpdir = None
self.tfile = tempfile.NamedTemporaryFile()
self.did_pre = False
self.did_post = False
self.did_write = False
def tearDown(self):
if self.tmpdir:
shutil.rmtree(self.tmpdir)
def markPre(self, fd):
self.did_pre = True
......@@ -253,6 +258,11 @@ class TestWriteFile(unittest.TestCase):
utils.WriteFile(self.tfile.name, data=data)
self.assertEqual(utils.ReadFile(self.tfile.name), data)
def testWriteSimpleUnicode(self):
data = u"abc"
utils.WriteFile(self.tfile.name, data=data)
self.assertEqual(utils.ReadFile(self.tfile.name), data)
def testErrors(self):
self.assertRaises(errors.ProgrammerError, utils.WriteFile,
self.tfile.name, data="test", fn=lambda fd: None)
......@@ -260,11 +270,22 @@ class TestWriteFile(unittest.TestCase):
self.assertRaises(errors.ProgrammerError, utils.WriteFile,
self.tfile.name, data="test", atime=0)
def testCalls(self):
utils.WriteFile(self.tfile.name, fn=self.markWrite,
prewrite=self.markPre, postwrite=self.markPost)
def testPreWrite(self):
utils.WriteFile(self.tfile.name, data="", prewrite=self.markPre)
self.assertTrue(self.did_pre)
self.assertFalse(self.did_post)
self.assertFalse(self.did_write)
def testPostWrite(self):
utils.WriteFile(self.tfile.name, data="", postwrite=self.markPost)
self.assertFalse(self.did_pre)
self.assertTrue(self.did_post)
self.assertFalse(self.did_write)
def testWriteFunction(self):
utils.WriteFile(self.tfile.name, fn=self.markWrite)
self.assertFalse(self.did_pre)
self.assertFalse(self.did_post)
self.assertTrue(self.did_write)
def testDryRun(self):
......@@ -293,6 +314,57 @@ class TestWriteFile(unittest.TestCase):
finally:
os.close(fd)
def testNoLeftovers(self):
self.tmpdir = tempfile.mkdtemp()
self.assertEqual(utils.WriteFile(utils.PathJoin(self.tmpdir, "test"),
data="abc"),
None)
self.assertEqual(os.listdir(self.tmpdir), ["test"])
def testFailRename(self):
self.tmpdir = tempfile.mkdtemp()
target = utils.PathJoin(self.tmpdir, "target")
os.mkdir(target)
self.assertRaises(OSError, utils.WriteFile, target, data="abc")