diff --git a/daemons/ganeti-masterd b/daemons/ganeti-masterd index 9b0851fe1b43223606e5ae1d80d0404bd781058a..3a77be41c38159d7a3d88c14ded349e03d11c711 100755 --- a/daemons/ganeti-masterd +++ b/daemons/ganeti-masterd @@ -246,7 +246,7 @@ class ClientOps: elif method == luxi.REQ_QUERY_JOBS: (job_ids, fields) = args if isinstance(job_ids, (tuple, list)) and job_ids: - msg = ", ".join(job_ids) + msg = utils.CommaJoin(job_ids) else: msg = str(job_ids) logging.info("Received job query request for %s", msg) diff --git a/daemons/ganeti-watcher b/daemons/ganeti-watcher index c09e7a475f4031a915873dfd60221a35fd8c9456..20350a06afe399d4bc64f0586ffc29087cb887df 100755 --- a/daemons/ganeti-watcher +++ b/daemons/ganeti-watcher @@ -430,7 +430,7 @@ class Watcher(object): # nothing to do return logging.debug("Will activate disks for instances %s", - ", ".join(offline_disk_instances)) + utils.CommaJoin(offline_disk_instances)) # we submit only one job, and wait for it. not optimal, but spams # less the job queue job = [opcodes.OpActivateInstanceDisks(instance_name=name) diff --git a/lib/backend.py b/lib/backend.py index 0a50100b390b55e766926a965f7c041dc5d86d04..f5f258bd25c1cb6439b4d343015420beed28b716 100644 --- a/lib/backend.py +++ b/lib/backend.py @@ -666,7 +666,7 @@ def BridgesExist(bridges_list): missing.append(bridge) if missing: - _Fail("Missing bridges %s", ", ".join(missing)) + _Fail("Missing bridges %s", utils.CommaJoin(missing)) def GetInstanceList(hypervisor_list): diff --git a/lib/cli.py b/lib/cli.py index 5e811b5f15df929a76e7b0def24062d9a51e96c2..752546b172129664c5e863d6b98ad82254c5c7f3 100644 --- a/lib/cli.py +++ b/lib/cli.py @@ -1666,7 +1666,7 @@ def GetOnlineNodes(nodes, cl=None, nowarn=False): use_locking=False) offline = [row[0] for row in result if row[1]] if offline and not nowarn: - ToStderr("Note: skipping offline node(s): %s" % ", ".join(offline)) + ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline)) return [row[0] for row in result if not row[1]] @@ -1758,7 +1758,7 @@ class JobExecutor(object): if self.verbose: ok_jobs = [row[1] for row in self.jobs if row[0]] if ok_jobs: - ToStdout("Submitted jobs %s", ", ".join(ok_jobs)) + ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs)) for submit_status, jid, name in self.jobs: if not submit_status: ToStderr("Failed to submit job for %s: %s", name, jid) diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 2f475579cd1bd893dcfb7db9c151b1061e3c7bcb..3872d7c58507ebbe2ac6cc54e3ffb7d9af66bf0a 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -511,7 +511,7 @@ def _CheckGlobalHvParams(params): if used_globals: msg = ("The following hypervisor parameters are global and cannot" " be customized at instance level, please modify them at" - " cluster level: %s" % ", ".join(used_globals)) + " cluster level: %s" % utils.CommaJoin(used_globals)) raise errors.OpPrereqError(msg, errors.ECODE_INVAL) @@ -698,7 +698,7 @@ def _AdjustCandidatePool(lu, exceptions): mod_list = lu.cfg.MaintainCandidatePool(exceptions) if mod_list: lu.LogInfo("Promoted nodes to master candidate role: %s", - ", ".join(node.name for node in mod_list)) + utils.CommaJoin(node.name for node in mod_list)) for name in mod_list: lu.context.ReaddNode(name) mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions) @@ -1498,7 +1498,7 @@ class LUVerifyCluster(LogicalUnit): # warn that the instance lives on offline nodes _ErrorIf(inst_nodes_offline, self.EINSTANCEBADNODE, instance, "instance lives on offline node(s) %s", - ", ".join(inst_nodes_offline)) + utils.CommaJoin(inst_nodes_offline)) feedback_fn("* Verifying orphan volumes") self._VerifyOrphanVolumes(node_vol_should, node_volume) @@ -5844,7 +5844,7 @@ class LUCreateInstance(LogicalUnit): self.op.pnode = ial.nodes[0] self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", self.op.instance_name, self.op.iallocator, - ", ".join(ial.nodes)) + utils.CommaJoin(ial.nodes)) if ial.required_nodes == 2: self.op.snode = ial.nodes[1] @@ -6631,7 +6631,7 @@ class TLReplaceDisks(Tasklet): return feedback_fn("Replacing disk(s) %s for %s" % - (", ".join([str(i) for i in self.disks]), self.instance.name)) + (utils.CommaJoin(self.disks), self.instance.name)) activate_disks = (not self.instance.admin_up) diff --git a/lib/config.py b/lib/config.py index 5374c3982796b1c2fb551b1c5c42c691498ba38a..a55906fd624d293f8eb97a9fdd432debd9659507 100644 --- a/lib/config.py +++ b/lib/config.py @@ -397,7 +397,7 @@ class ConfigWriter: for pnum in keys: pdata = ports[pnum] if len(pdata) > 1: - txt = ", ".join(["%s/%s" % val for val in pdata]) + txt = utils.CommaJoin(["%s/%s" % val for val in pdata]) result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt)) # highest used tcp port check @@ -465,7 +465,7 @@ class ConfigWriter: for ip, owners in ips.items(): if len(owners) > 1: result.append("IP address %s is used by multiple owners: %s" % - (ip, ", ".join(owners))) + (ip, utils.CommaJoin(owners))) return result @@ -1272,7 +1272,7 @@ class ConfigWriter: config_errors = self._UnlockedVerifyConfig() if config_errors: errmsg = ("Configuration data is not consistent: %s" % - (", ".join(config_errors))) + (utils.CommaJoin(config_errors))) logging.critical(errmsg) if feedback_fn: feedback_fn(errmsg) @@ -1442,4 +1442,3 @@ class ConfigWriter: self._temporary_ids.DropECReservations(ec_id) self._temporary_macs.DropECReservations(ec_id) self._temporary_secrets.DropECReservations(ec_id) - diff --git a/lib/jqueue.py b/lib/jqueue.py index ed862dda2284ede832d644f5339f0cdbcfbf0000..13f112fff11b877399e2839d6154854d0775a2c1 100644 --- a/lib/jqueue.py +++ b/lib/jqueue.py @@ -1260,7 +1260,7 @@ class JobQueue(object): self._RenameFilesUnlocked(rename_files) logging.debug("Successfully archived job(s) %s", - ", ".join(job.id for job in archive_jobs)) + utils.CommaJoin(job.id for job in archive_jobs)) return len(archive_jobs) diff --git a/lib/locking.py b/lib/locking.py index 37b2a8b7a7e5e4f3b8bdda550ead3bfb188dd5d8..08399a1e157129d6691a57e6fbfa97b41d7a20b6 100644 --- a/lib/locking.py +++ b/lib/locking.py @@ -1267,8 +1267,8 @@ class GanetiLockManager: not self._upper_owned(LEVEL_CLUSTER)), ( "Cannot release the Big Ganeti Lock while holding something" " at upper levels (%r)" % - (", ".join(["%s=%r" % (LEVEL_NAMES[i], self._list_owned(i)) - for i in self.__keyring.keys()]), )) + (utils.CommaJoin(["%s=%r" % (LEVEL_NAMES[i], self._list_owned(i)) + for i in self.__keyring.keys()]), )) # Release will complain if we don't own the locks already return self.__keyring[level].release(names) diff --git a/lib/utils.py b/lib/utils.py index b2ffff9bfc309c3c8a0acdf2c26c78965693a411..91baa8849da1d333e2333b98df3e48c868a80e73 100644 --- a/lib/utils.py +++ b/lib/utils.py @@ -1891,7 +1891,7 @@ def CommaJoin(names): @return: a string with the formatted results """ - return ", ".join(["'%s'" % val for val in names]) + return ", ".join([str(val) for val in names]) def BytesToMebibyte(value): diff --git a/scripts/gnt-cluster b/scripts/gnt-cluster index f6101ba3735a593cb3e032432dc1793d6c07b0fa..85a2a127e1181ee6f921eede0a08d10032fcf98b 100755 --- a/scripts/gnt-cluster +++ b/scripts/gnt-cluster @@ -240,14 +240,15 @@ def ShowClusterConfig(opts, args): result["architecture"][0], result["architecture"][1]) if result["tags"]: - tags = ", ".join(utils.NiceSort(result["tags"])) + tags = utils.CommaJoin(utils.NiceSort(result["tags"])) else: tags = "(none)" ToStdout("Tags: %s", tags) ToStdout("Default hypervisor: %s", result["default_hypervisor"]) - ToStdout("Enabled hypervisors: %s", ", ".join(result["enabled_hypervisors"])) + ToStdout("Enabled hypervisors: %s", + utils.CommaJoin(result["enabled_hypervisors"])) ToStdout("Hypervisor parameters:") _PrintGroupedParams(result["hvparams"]) diff --git a/scripts/gnt-instance b/scripts/gnt-instance index ead004f82e41a810ce4a01dc1eab93d59daa14fb..44b84101b973c5bcb16825d1cea86062c5de0b4d 100755 --- a/scripts/gnt-instance +++ b/scripts/gnt-instance @@ -1120,7 +1120,7 @@ def ShowInstanceConfig(opts, args): ## instance["auto_balance"]) buf.write(" Nodes:\n") buf.write(" - primary: %s\n" % instance["pnode"]) - buf.write(" - secondaries: %s\n" % ", ".join(instance["snodes"])) + buf.write(" - secondaries: %s\n" % utils.CommaJoin(instance["snodes"])) buf.write(" Operating system: %s\n" % instance["os"]) if instance.has_key("network_port"): buf.write(" Allocated network port: %s\n" % instance["network_port"]) @@ -1336,7 +1336,7 @@ commands = { " hv/NAME, be/memory, be/vcpus, be/auto_balance," " hypervisor." " The default field" - " list is (in order): %s." % ", ".join(_LIST_DEF_FIELDS), + " list is (in order): %s." % utils.CommaJoin(_LIST_DEF_FIELDS), ), 'reinstall': ( ReinstallInstance, [ArgInstance()], diff --git a/scripts/gnt-job b/scripts/gnt-job index 80def4b27745897211020e1f38317807a89bf2f4..44ae4b2dbf655c59c33116236001811353485a7c 100755 --- a/scripts/gnt-job +++ b/scripts/gnt-job @@ -203,7 +203,7 @@ def ShowJobs(opts, args): def result_helper(value): """Format a result field in a nice way.""" if isinstance(value, (tuple, list)): - return "[%s]" % (", ".join(str(elem) for elem in value)) + return "[%s]" % utils.CommaJoin(value) else: return str(value) @@ -351,7 +351,7 @@ commands = { " (see the man page for details): id, status, op_list," " op_status, op_result." " The default field" - " list is (in order): %s." % ", ".join(_LIST_DEF_FIELDS)), + " list is (in order): %s." % utils.CommaJoin(_LIST_DEF_FIELDS)), 'archive': ( ArchiveJobs, [ArgJobId(min=1)], [], "<job-id> [<job-id> ...]", "Archive specified jobs"), diff --git a/scripts/gnt-node b/scripts/gnt-node index 0ca27942527e5b72395cd2a137e1cbca41b69595..7e3e439608c24df3573b398594ee1bc43f40a925 100755 --- a/scripts/gnt-node +++ b/scripts/gnt-node @@ -677,7 +677,7 @@ commands = { "[nodes...]", "Lists the nodes in the cluster. The available fields are (see the man" " page for details): %s. The default field list is (in order): %s." % - (", ".join(_LIST_HEADERS), ", ".join(_LIST_DEF_FIELDS))), + (utils.CommaJoin(_LIST_HEADERS), utils.CommaJoin(_LIST_DEF_FIELDS))), 'modify': ( SetNodeParams, ARGS_ONE_NODE, [FORCE_OPT, SUBMIT_OPT, MC_OPT, DRAINED_OPT, OFFLINE_OPT], @@ -698,7 +698,7 @@ commands = { [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, _STORAGE_TYPE_OPT], "[<node_name>...]", "List physical volumes on node(s). The available" " fields are (see the man page for details): %s." % - (", ".join(_LIST_STOR_HEADERS))), + (utils.CommaJoin(_LIST_STOR_HEADERS))), 'modify-storage': ( ModifyStorage, [ArgNode(min=1, max=1), diff --git a/scripts/gnt-os b/scripts/gnt-os index e5b0fed2e3f2c072a19cf973e679a8d2075ea745..f3667c47b03f768491b6c1b2c6f8d3950e433f7c 100755 --- a/scripts/gnt-os +++ b/scripts/gnt-os @@ -117,7 +117,7 @@ def DiagnoseOS(opts, args): first_os_variants = [] first_os_msg = ("%s (path: %s) [variants: %s]" % (_OsStatus(first_os_status, first_os_msg), - first_os_path, ', '.join(first_os_variants))) + first_os_path, utils.CommaJoin(first_os_variants))) if first_os_status: nodes_valid[node_name] = first_os_msg else: @@ -146,7 +146,7 @@ def DiagnoseOS(opts, args): ToStdout("OS: %s [global status: %s]", os_name, status) if os_variants: - ToStdout(" Variants: [%s]" % ', '.join(os_variants)) + ToStdout(" Variants: [%s]" % utils.CommaJoin(os_variants)) _OutputPerNodeOSStatus(nodes_valid) _OutputPerNodeOSStatus(nodes_bad) ToStdout("") diff --git a/tools/burnin b/tools/burnin index 395edbf65a076ecf2cc60ab4165dfa1717d9d66d..c442957a0bae80a870397aeb39a96b1b67414611 100755 --- a/tools/burnin +++ b/tools/burnin @@ -336,7 +336,7 @@ class Burner(object): """ self.ClearFeedbackBuf() job_ids = [cli.SendJob(row[0], cl=self.cl) for row in jobs] - Log("Submitted job ID(s) %s" % ", ".join(job_ids), indent=1) + Log("Submitted job ID(s) %s" % utils.CommaJoin(job_ids), indent=1) results = [] for jid, (_, iname) in zip(job_ids, jobs): Log("waiting for job %s for %s" % (jid, iname), indent=2)