Commit 3cebe102 authored by Michael Hanselmann's avatar Michael Hanselmann

Remove RpcResult.RemoteFailMsg completely

Signed-off-by: default avatarMichael Hanselmann <hansmi@google.com>
Reviewed-by: default avatarIustin Pop <iustin@google.com>
parent 5ee09f03
......@@ -566,7 +566,7 @@ def ExecMasterd (options, args):
# activate ip
master_node = ssconf.SimpleStore().GetMasterNode()
result = rpc.RpcRunner.call_node_start_master(master_node, False, False)
msg = result.RemoteFailMsg()
msg = result.fail_msg
if msg:
logging.error("Can't activate master IP address: %s", msg)
......
......@@ -323,11 +323,11 @@ def FinalizeClusterDestroy(master):
"""
result = rpc.RpcRunner.call_node_stop_master(master, True)
msg = result.RemoteFailMsg()
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master role: %s" % msg)
result = rpc.RpcRunner.call_node_leave_cluster(master)
msg = result.RemoteFailMsg()
msg = result.fail_msg
if msg:
logging.warning("Could not shutdown the node daemon and cleanup"
" the node: %s", msg)
......@@ -442,7 +442,7 @@ def MasterFailover(no_voting=False):
logging.info("Setting master to %s, old master: %s", new_master, old_master)
result = rpc.RpcRunner.call_node_stop_master(old_master, True)
msg = result.RemoteFailMsg()
msg = result.fail_msg
if msg:
logging.error("Could not disable the master role on the old master"
" %s, please disable manually: %s", old_master, msg)
......@@ -460,7 +460,7 @@ def MasterFailover(no_voting=False):
cfg.Update(cluster_info)
result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
msg = result.RemoteFailMsg()
msg = result.fail_msg
if msg:
logging.error("Could not start the master role on the new master"
" %s, please check: %s", new_master, msg)
......@@ -523,7 +523,7 @@ def GatherMasterVotes(node_list):
for node in results:
nres = results[node]
data = nres.payload
msg = nres.RemoteFailMsg()
msg = nres.fail_msg
fail = False
if msg:
logging.warning("Error contacting node %s: %s", node, msg)
......
......@@ -1641,7 +1641,7 @@ class LURepairDiskSizes(NoHooksLU):
changed = []
for node, dskl in per_node_disks.items():
result = self.rpc.call_blockdev_getsizes(node, [v[2] for v in dskl])
if result.RemoteFailMsg():
if result.fail_msg:
self.LogWarning("Failure in blockdev_getsizes call to node"
" %s, ignoring", node)
continue
......@@ -2885,7 +2885,7 @@ class LUAddNode(LogicalUnit):
# and make sure the new node will not have old files around
if not new_node.master_candidate:
result = self.rpc.call_node_demote_from_mc(new_node.name)
msg = result.RemoteFailMsg()
msg = result.fail_msg
if msg:
self.LogWarning("Node failed to demote itself from master"
" candidate status: %s" % msg)
......@@ -3013,7 +3013,7 @@ class LUSetNodeParams(LogicalUnit):
changed_mc = True
result.append(("master_candidate", "auto-demotion due to drain"))
rrc = self.rpc.call_node_demote_from_mc(node.name)
msg = rrc.RemoteFailMsg()
msg = rrc.fail_msg
if msg:
self.LogWarning("Node failed to demote itself: %s" % msg)
if node.offline:
......@@ -4077,7 +4077,7 @@ class LUQueryInstances(NoHooksLU):
if result.offline:
# offline nodes will be in both lists
off_nodes.append(name)
if result.RemoteFailMsg():
if result.fail_msg:
bad_nodes.append(name)
else:
if result.payload:
......
......@@ -1090,7 +1090,7 @@ class ConfigWriter:
result = rpc.RpcRunner.call_upload_file(node_list, self._cfg_file,
address_list=addr_list)
for to_node, to_result in result.items():
msg = to_result.RemoteFailMsg()
msg = to_result.fail_msg
if msg:
msg = ("Copy of file %s to node %s failed: %s" %
(self._cfg_file, to_node, msg))
......@@ -1126,7 +1126,7 @@ class ConfigWriter:
self._UnlockedGetNodeList(),
self._UnlockedGetSsconfValues())
for nname, nresu in result.items():
msg = nresu.RemoteFailMsg()
msg = nresu.fail_msg
if msg:
logging.warning("Error while uploading ssconf files to"
" node %s: %s", nname, msg)
......
......@@ -673,7 +673,7 @@ class JobQueue(object):
# Clean queue directory on added node
result = rpc.RpcRunner.call_jobqueue_purge(node_name)
msg = result.RemoteFailMsg()
msg = result.fail_msg
if msg:
logging.warning("Cannot cleanup queue directory on node %s: %s",
node_name, msg)
......@@ -697,7 +697,7 @@ class JobQueue(object):
result = rpc.RpcRunner.call_jobqueue_update([node_name],
[node.primary_ip],
file_name, content)
msg = result[node_name].RemoteFailMsg()
msg = result[node_name].fail_msg
if msg:
logging.error("Failed to upload file %s to node %s: %s",
file_name, node_name, msg)
......@@ -737,7 +737,7 @@ class JobQueue(object):
success = []
for node in nodes:
msg = result[node].RemoteFailMsg()
msg = result[node].fail_msg
if msg:
failed.append(node)
logging.error("RPC call %s failed on node %s: %s",
......
......@@ -448,7 +448,7 @@ class HooksMaster(object):
res = results[node_name]
if res.offline:
continue
msg = res.RemoteFailMsg()
msg = res.fail_msg
if msg:
self.lu.LogWarning("Communication failure to node %s: %s",
node_name, msg)
......
......@@ -147,14 +147,6 @@ class RpcResult(object):
ec = errors.OpExecError
raise ec(msg)
def RemoteFailMsg(self):
"""Check if the remote procedure failed.
@return: the fail_msg attribute
"""
return self.fail_msg
class Client:
"""RPC Client class.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment