Commit 77921a95 authored by Iustin Pop's avatar Iustin Pop
Browse files

Disable synchronous (locking) queries

This patch raises an error in the master daemon in case the user
requests a locking query; accordingly, all clients were modified to send
only lockless queries. This is short-term fix, for proper fix the
clients should be modified to submit a job when the user request a
locking query.

The other approach would be to ignore the flag passed by the client;
this would be worse as client's wouldn't get at least an error.

The possible impact of this is multiple:
  - some commands could have been not converted, and thus fail; this
    can be remedied easily
  - the consistency of commands is lost; e.g. node failover will not
    lock the node *while we get the node info*, so we could miss some
    data; this is again in the thread of atomic operations which are
    missing in the current model of query-and-act from gnt-* scripts

Reviewed-by: imsnah, ultrotter
parent 2c404217
......@@ -247,6 +247,8 @@ class ClientOps:
elif method == luxi.REQ_QUERY_INSTANCES:
(names, fields, use_locking) = args"Received instance query request for %s", names)
if use_locking:
raise errors.OpPrereqError("Sync queries are not allowed")
op = opcodes.OpQueryInstances(names=names, output_fields=fields,
return self._Query(op)
......@@ -254,12 +256,16 @@ class ClientOps:
elif method == luxi.REQ_QUERY_NODES:
(names, fields, use_locking) = args"Received node query request for %s", names)
if use_locking:
raise errors.OpPrereqError("Sync queries are not allowed")
op = opcodes.OpQueryNodes(names=names, output_fields=fields,
return self._Query(op)
elif method == luxi.REQ_QUERY_EXPORTS:
nodes, use_locking = args
if use_locking:
raise errors.OpPrereqError("Sync queries are not allowed")"Received exports query request")
op = opcodes.OpQueryExports(nodes=nodes, use_locking=use_locking)
return self._Query(op)
......@@ -45,7 +45,7 @@ def PrintExportList(opts, args):
@return: the desired exit code
exports = GetClient().QueryExports(opts.nodes, True)
exports = GetClient().QueryExports(opts.nodes, False)
retcode = 0
for node in exports:
ToStdout("Node: %s", node)
......@@ -90,7 +90,7 @@ def _ExpandMultiNames(mode, names, client=None):
if not names:
raise errors.OpPrereqError("No node names passed")
ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
ipri = [row[1] for row in ndata]
pri_names = list(itertools.chain(*ipri))
isec = [row[2] for row in ndata]
......@@ -77,7 +77,7 @@ def AddNode(opts, args):
output = cl.QueryNodes(names=[node], fields=['name', 'sip'],
node_exists, sip = output[0]
except (errors.OpPrereqError, errors.OpExecError):
node_exists = ""
......@@ -206,7 +206,7 @@ def EvacuateNode(opts, args):
src_node = args[0]
result = cl.QueryNodes(names=[src_node], fields=selected_fields,
src_node, sinst = result[0]
if not sinst:
......@@ -214,7 +214,8 @@ def EvacuateNode(opts, args):
return constants.EXIT_SUCCESS
if dst_node is not None:
result = cl.QueryNodes(names=[dst_node], fields=["name"], use_locking=True)
result = cl.QueryNodes(names=[dst_node], fields=["name"],
dst_node = result[0][0]
if src_node == dst_node:
......@@ -263,7 +264,7 @@ def FailoverNode(opts, args):
# these fields are static data anyway, so it doesn't matter, but
# locking=True should be safer
result = cl.QueryNodes(names=args, fields=selected_fields,
node, pinst = result[0]
if not pinst:
......@@ -301,7 +302,7 @@ def MigrateNode(opts, args):
force = opts.force
selected_fields = ["name", "pinst_list"]
result = cl.QueryNodes(names=args, fields=selected_fields, use_locking=True)
result = cl.QueryNodes(names=args, fields=selected_fields, use_locking=False)
node, pinst = result[0]
if not pinst:
......@@ -348,7 +349,7 @@ def ShowNodeConfig(opts, args):
result = cl.QueryNodes(fields=["name", "pip", "sip",
"pinst_list", "sinst_list",
"master_candidate", "drained", "offline"],
names=args, use_locking=True)
names=args, use_locking=False)
for (name, primary_ip, secondary_ip, pinst, sinst,
is_mc, drained, offline) in result:
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment