Commit c4feafe8 authored by Iustin Pop's avatar Iustin Pop

Switch from os.path.join to utils.PathJoin

This passes a full burnin with lots of instances, and should be safe as
we mostly to join a known root (various constants) to a run-time
variable.
Signed-off-by: default avatarIustin Pop <iustin@google.com>
Reviewed-by: default avatarMichael Hanselmann <hansmi@google.com>
parent 4bb678e9
......@@ -85,8 +85,8 @@ def RunWatcherHooks():
"""Run the watcher hooks.
"""
hooks_dir = os.path.join(constants.HOOKS_BASE_DIR,
constants.HOOKS_NAME_WATCHER)
hooks_dir = utils.PathJoin(constants.HOOKS_BASE_DIR,
constants.HOOKS_NAME_WATCHER)
try:
results = utils.RunParts(hooks_dir)
......
......@@ -163,7 +163,7 @@ def _CleanDirectory(path, exclude=None):
exclude = [os.path.normpath(i) for i in exclude]
for rel_name in utils.ListVisibleFiles(path):
full_name = os.path.normpath(os.path.join(path, rel_name))
full_name = utils.PathJoin(path, rel_name)
if full_name in exclude:
continue
if os.path.isfile(full_name) and not os.path.islink(full_name):
......@@ -907,8 +907,8 @@ def _GetVGInfo(vg_name):
def _GetBlockDevSymlinkPath(instance_name, idx):
return os.path.join(constants.DISK_LINKS_DIR,
"%s:%d" % (instance_name, idx))
return utils.PathJoin(constants.DISK_LINKS_DIR,
"%s:%d" % (instance_name, idx))
def _SymlinkBlockDev(instance_name, device_path, idx):
......@@ -1995,7 +1995,7 @@ def ExportSnapshot(disk, dest_node, instance, cluster_name, idx, debug):
export_env['EXPORT_DEVICE'] = real_disk.dev_path
export_env['EXPORT_INDEX'] = str(idx)
destdir = os.path.join(constants.EXPORT_DIR, instance.name + ".new")
destdir = utils.PathJoin(constants.EXPORT_DIR, instance.name + ".new")
destfile = disk.physical_id[1]
# the target command is built out of three individual commands,
......@@ -2035,8 +2035,8 @@ def FinalizeExport(instance, snap_disks):
@rtype: None
"""
destdir = os.path.join(constants.EXPORT_DIR, instance.name + ".new")
finaldestdir = os.path.join(constants.EXPORT_DIR, instance.name)
destdir = utils.PathJoin(constants.EXPORT_DIR, instance.name + ".new")
finaldestdir = utils.PathJoin(constants.EXPORT_DIR, instance.name)
config = objects.SerializableConfigParser()
......@@ -2079,7 +2079,7 @@ def FinalizeExport(instance, snap_disks):
config.set(constants.INISECT_INS, 'disk_count' , '%d' % disk_total)
utils.WriteFile(os.path.join(destdir, constants.EXPORT_CONF_FILE),
utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE),
data=config.Dumps())
shutil.rmtree(finaldestdir, True)
shutil.move(destdir, finaldestdir)
......@@ -2096,7 +2096,7 @@ def ExportInfo(dest):
export info
"""
cff = os.path.join(dest, constants.EXPORT_CONF_FILE)
cff = utils.PathJoin(dest, constants.EXPORT_CONF_FILE)
config = objects.SerializableConfigParser()
config.read(cff)
......@@ -2179,7 +2179,7 @@ def RemoveExport(export):
@rtype: None
"""
target = os.path.join(constants.EXPORT_DIR, export)
target = utils.PathJoin(constants.EXPORT_DIR, export)
try:
shutil.rmtree(target)
......
......@@ -5866,7 +5866,7 @@ class LUCreateInstance(LogicalUnit):
self.needed_locks[locking.LEVEL_NODE].append(src_node)
if not os.path.isabs(src_path):
self.op.src_path = src_path = \
os.path.join(constants.EXPORT_DIR, src_path)
utils.PathJoin(constants.EXPORT_DIR, src_path)
# On import force_variant must be True, because if we forced it at
# initial install, our only chance when importing it back is that it
......@@ -5974,8 +5974,8 @@ class LUCreateInstance(LogicalUnit):
if src_path in exp_list[node].payload:
found = True
self.op.src_node = src_node = node
self.op.src_path = src_path = os.path.join(constants.EXPORT_DIR,
src_path)
self.op.src_path = src_path = utils.PathJoin(constants.EXPORT_DIR,
src_path)
break
if not found:
raise errors.OpPrereqError("No export found for relative path %s" %
......@@ -6012,7 +6012,7 @@ class LUCreateInstance(LogicalUnit):
if export_info.has_option(constants.INISECT_INS, option):
# FIXME: are the old os-es, disk sizes, etc. useful?
export_name = export_info.get(constants.INISECT_INS, option)
image = os.path.join(src_path, export_name)
image = utils.PathJoin(src_path, export_name)
disk_images.append(image)
else:
disk_images.append(False)
......@@ -6148,9 +6148,8 @@ class LUCreateInstance(LogicalUnit):
string_file_storage_dir = self.op.file_storage_dir
# build the full file storage dir path
file_storage_dir = os.path.normpath(os.path.join(
self.cfg.GetFileStorageDir(),
string_file_storage_dir, instance))
file_storage_dir = utils.PathJoin(self.cfg.GetFileStorageDir(),
string_file_storage_dir, instance)
disks = _GenerateDiskTemplate(self,
......
......@@ -885,9 +885,9 @@ class ConfigWriter:
# rename the file paths in logical and physical id
file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
disk.physical_id = disk.logical_id = (disk.logical_id[0],
os.path.join(file_storage_dir,
inst.name,
disk.iv_name))
utils.PathJoin(file_storage_dir,
inst.name,
disk.iv_name))
self._config_data.instances[inst.name] = inst
self._WriteConfig()
......
......@@ -110,7 +110,7 @@ class ChrootManager(hv_base.BaseHypervisor):
"""
return [name for name in os.listdir(self._ROOT_DIR)
if self._IsDirLive(os.path.join(self._ROOT_DIR, name))]
if self._IsDirLive(utils.PathJoin(self._ROOT_DIR, name))]
def GetInstanceInfo(self, instance_name):
"""Get instance properties.
......@@ -134,7 +134,7 @@ class ChrootManager(hv_base.BaseHypervisor):
"""
data = []
for file_name in os.listdir(self._ROOT_DIR):
path = os.path.join(self._ROOT_DIR, file_name)
path = utils.PathJoin(self._ROOT_DIR, file_name)
if self._IsDirLive(path):
data.append((file_name, 0, 0, 0, 0, 0))
return data
......
......@@ -882,7 +882,7 @@ class JobQueue(object):
@return: the path to the job file
"""
return os.path.join(constants.QUEUE_DIR, "job-%s" % job_id)
return utils.PathJoin(constants.QUEUE_DIR, "job-%s" % job_id)
@classmethod
def _GetArchivedJobPath(cls, job_id):
......@@ -895,7 +895,7 @@ class JobQueue(object):
"""
path = "%s/job-%s" % (cls._GetArchiveDirectory(job_id), job_id)
return os.path.join(constants.JOB_QUEUE_ARCHIVE_DIR, path)
return utils.PathJoin(constants.JOB_QUEUE_ARCHIVE_DIR, path)
@classmethod
def _ExtractJobID(cls, name):
......
......@@ -52,7 +52,7 @@ def GetUserFiles(user, mkdir=False):
if not user_dir:
raise errors.OpExecError("Cannot resolve home of user %s" % user)
ssh_dir = os.path.join(user_dir, ".ssh")
ssh_dir = utils.PathJoin(user_dir, ".ssh")
if not os.path.lexists(ssh_dir):
if mkdir:
try:
......@@ -63,7 +63,7 @@ def GetUserFiles(user, mkdir=False):
elif not os.path.isdir(ssh_dir):
raise errors.OpExecError("path ~%s/.ssh is not a directory" % user)
return [os.path.join(ssh_dir, base)
return [utils.PathJoin(ssh_dir, base)
for base in ["id_dsa", "id_dsa.pub", "authorized_keys"]]
......
......@@ -310,7 +310,7 @@ def RunParts(dir_name, env=None, reset_env=False):
return rr
for relname in sorted(dir_contents):
fname = os.path.join(dir_name, relname)
fname = PathJoin(dir_name, relname)
if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
constants.EXT_PLUGIN_MASK.match(relname) is not None):
rr.append((relname, constants.RUNPARTS_SKIP, None))
......@@ -1607,7 +1607,7 @@ def DaemonPidFileName(name):
daemon name
"""
return os.path.join(constants.RUN_GANETI_DIR, "%s.pid" % name)
return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
def EnsureDaemon(name):
......@@ -2106,7 +2106,7 @@ def CalculateDirectorySize(path):
for (curpath, _, files) in os.walk(path):
for filename in files:
st = os.lstat(os.path.join(curpath, filename))
st = os.lstat(PathJoin(curpath, filename))
size += st.st_size
return BytesToMebibyte(size)
......
......@@ -23,7 +23,6 @@
"""
import os
import sys
import optparse
import time
......@@ -658,7 +657,7 @@ class Burner(object):
shutdown=True)
rem_op = opcodes.OpRemoveInstance(instance_name=instance,
ignore_failures=True)
imp_dir = os.path.join(constants.EXPORT_DIR, full_name)
imp_dir = utils.PathJoin(constants.EXPORT_DIR, full_name)
imp_op = opcodes.OpCreateInstance(instance_name=instance,
disks = [ {"size": size}
for size in self.disk_size],
......
......@@ -125,7 +125,7 @@ class Merger(object):
" key from %s. Fail reason: %s; output: %s" %
(cluster, result.fail_reason, result.output))
key_path = os.path.join(self.work_dir, cluster)
key_path = utils.PathJoin(self.work_dir, cluster)
utils.WriteFile(key_path, mode=0600, data=result.stdout)
result = self._RunCmd(cluster, "gnt-node list -o name --no-header",
......@@ -254,8 +254,8 @@ class Merger(object):
(data.cluster, result.fail_reason,
result.output))
data.config_path = os.path.join(self.work_dir, "%s_config.data" %
data.cluster)
data.config_path = utils.PathJoin(self.work_dir, "%s_config.data" %
data.cluster)
utils.WriteFile(data.config_path, data=result.stdout)
# R0201: Method could be a function
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment