diff --git a/daemons/ganeti-noded b/daemons/ganeti-noded index 0678d114d169bcab42547d06185f8d7b6f43ce82..6ec61dc59757c047195c37e542fe32fa8ce1eea7 100755 --- a/daemons/ganeti-noded +++ b/daemons/ganeti-noded @@ -476,7 +476,7 @@ class NodeHttpServer(http.server.HttpServer): """Query the list of running instances. """ - return backend.GetInstanceList(params[0]) + return True, backend.GetInstanceList(params[0]) # node -------------------------- diff --git a/lib/backend.py b/lib/backend.py index 14d600d5d30cf7d15ac45b852d368d902f783e72..a343649366161bd30422352413eb0c2e2cbaf7d2 100644 --- a/lib/backend.py +++ b/lib/backend.py @@ -603,8 +603,8 @@ def GetInstanceList(hypervisor_list): names = hypervisor.GetHypervisor(hname).ListInstances() results.extend(names) except errors.HypervisorError, err: - logging.exception("Error enumerating instances for hypevisor %s", hname) - raise + _Fail("Error enumerating instances (hypervisor %s): %s", + hname, err, exc=True) return results diff --git a/lib/cmdlib.py b/lib/cmdlib.py index cc22e3d2f5a362cae1d26f2027dda662bbf67b69..1ca4528c1ef4d2e66f22f971aefd7ed569727d05 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -2769,14 +2769,14 @@ def _SafeShutdownInstanceDisks(lu, instance): _ShutdownInstanceDisks. """ - ins_l = lu.rpc.call_instance_list([instance.primary_node], - [instance.hypervisor]) - ins_l = ins_l[instance.primary_node] - if ins_l.failed or not isinstance(ins_l.data, list): - raise errors.OpExecError("Can't contact node '%s'" % - instance.primary_node) - - if instance.name in ins_l.data: + pnode = instance.primary_node + ins_l = lu.rpc.call_instance_list([pnode], [instance.hypervisor]) + ins_l = ins_l[pnode] + msg = ins_l.RemoteFailMsg() + if msg: + raise errors.OpExecError("Can't contact node %s: %s" % (pnode, msg)) + + if instance.name in ins_l.payload: raise errors.OpExecError("Instance is running, can't shutdown" " block devices.") @@ -3890,12 +3890,12 @@ class LUMigrateInstance(LogicalUnit): " a bad state)") ins_l = self.rpc.call_instance_list(self.all_nodes, [instance.hypervisor]) for node, result in ins_l.items(): - result.Raise() - if not isinstance(result.data, list): - raise errors.OpExecError("Can't contact node '%s'" % node) + msg = result.RemoteFailMsg() + if msg: + raise errors.OpExecError("Can't contact node %s: %s" % (node, msg)) - runningon_source = instance.name in ins_l[source_node].data - runningon_target = instance.name in ins_l[target_node].data + runningon_source = instance.name in ins_l[source_node].payload + runningon_target = instance.name in ins_l[target_node].payload if runningon_source and runningon_target: raise errors.OpExecError("Instance seems to be running on two nodes," @@ -5010,9 +5010,12 @@ class LUConnectConsole(NoHooksLU): node_insts = self.rpc.call_instance_list([node], [instance.hypervisor])[node] - node_insts.Raise() + msg = node_insts.RemoteFailMsg() + if msg: + raise errors.OpExecError("Can't get node information from %s: %s" % + (node, msg)) - if instance.name not in node_insts.data: + if instance.name not in node_insts.payload: raise errors.OpExecError("Instance %s is not running." % instance.name) logging.debug("Connecting to console of %s on %s", instance.name, node) @@ -6217,9 +6220,11 @@ class LUSetInstanceParams(LogicalUnit): " an instance") ins_l = self.rpc.call_instance_list([pnode], [instance.hypervisor]) ins_l = ins_l[pnode] - if ins_l.failed or not isinstance(ins_l.data, list): - raise errors.OpPrereqError("Can't contact node '%s'" % pnode) - if instance.name in ins_l.data: + msg = ins_l.RemoteFailMsg() + if msg: + raise errors.OpPrereqError("Can't contact node %s: %s" % + (pnode, msg)) + if instance.name in ins_l.payload: raise errors.OpPrereqError("Instance is running, can't remove" " disks.")