Commit a9f33339 authored by Petr Pudlak's avatar Petr Pudlak Committed by Petr Pudlak
Browse files

Use custom SSH ports in node groups when working with nodes

Calling `gnt-instance console` with a custom SSH port doesn't work yet.
Signed-off-by: default avatarPetr Pudlak <>
Reviewed-by: default avatarHrvoje Ribicic <>
parent f2152285
......@@ -929,7 +929,7 @@ def _VerifyNodeInfo(what, vm_capable, result, all_hvparams):
result[constants.NV_HVINFO] = hyper.GetNodeInfo(hvparams=hvparams)
def VerifyNode(what, cluster_name, all_hvparams):
def VerifyNode(what, cluster_name, all_hvparams, node_groups, groups_cfg):
"""Verify the status of the local node.
Based on the input L{what} parameter, various checks are done on the
......@@ -957,6 +957,11 @@ def VerifyNode(what, cluster_name, all_hvparams):
@param cluster_name: the cluster's name
@type all_hvparams: dict of dict of strings
@param all_hvparams: a dictionary mapping hypervisor names to hvparams
@type node_groups: a dict of strings
@param node_groups: node _names_ mapped to their group uuids (it's enough to
have only those nodes that are in `what["nodelist"]`)
@type groups_cfg: a dict of dict of strings
@param groups_cfg: a dictionary mapping group uuids to their configuration
@rtype: dict
@return: a dictionary with the same keys as the input dict, and
values representing the result of the checks
......@@ -992,7 +997,12 @@ def VerifyNode(what, cluster_name, all_hvparams):
# Try to contact all nodes
val = {}
for node in nodes:
success, message = _GetSshRunner(cluster_name).VerifyNodeHostname(node)
params = groups_cfg.get(node_groups.get(node))
ssh_port = params["ndparams"].get(constants.ND_SSH_PORT)
logging.debug("Ssh port %s (None = default) for node %s",
str(ssh_port), node)
success, message = _GetSshRunner(cluster_name). \
VerifyNodeHostname(node, ssh_port)
if not success:
val[node] = message
......@@ -280,7 +280,8 @@ def _WaitForSshDaemon(hostname, port, family):
def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
use_cluster_key, ask_key, strict_host_check, data):
use_cluster_key, ask_key, strict_host_check,
port, data):
"""Runs a command to configure something on a remote machine.
@type cluster_name: string
......@@ -299,6 +300,8 @@ def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
@param ask_key: See L{ssh.SshRunner.BuildCmd}
@type strict_host_check: bool
@param strict_host_check: See L{ssh.SshRunner.BuildCmd}
@type port: int
@param port: The SSH port of the remote machine or None for the default
@param data: JSON-serializable input data for script (passed to stdin)
......@@ -311,6 +314,9 @@ def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
if verbose:
if port is None:
port = netutils.GetDaemonPort(constants.SSH)
family = ssconf.SimpleStore().GetPrimaryIPFamily()
srun = ssh.SshRunner(cluster_name,
ipv6=(family ==
......@@ -318,7 +324,8 @@ def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
batch=False, ask_key=ask_key, quiet=False,
tempfh = tempfile.TemporaryFile()
......@@ -333,7 +340,7 @@ def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
raise errors.OpExecError("Command '%s' failed: %s" %
(result.cmd, result.fail_reason))
_WaitForSshDaemon(node, netutils.GetDaemonPort(constants.SSH), family)
_WaitForSshDaemon(node, port, family)
def _InitFileStorageDir(file_storage_dir):
......@@ -874,7 +881,7 @@ def FinalizeClusterDestroy(master_uuid):
" the node: %s", msg)
def SetupNodeDaemon(opts, cluster_name, node):
def SetupNodeDaemon(opts, cluster_name, node, ssh_port):
"""Add a node to the cluster.
This function must be called before the actual opcode, and will ssh
......@@ -883,6 +890,7 @@ def SetupNodeDaemon(opts, cluster_name, node):
@param cluster_name: the cluster name
@param node: the name of the new node
@param ssh_port: the SSH port of the new node
data = {
......@@ -895,7 +903,8 @@ def SetupNodeDaemon(opts, cluster_name, node):
RunNodeSetupCmd(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
opts.debug, opts.verbose,
True, opts.ssh_key_check, opts.ssh_key_check, data)
True, opts.ssh_key_check, opts.ssh_key_check,
ssh_port, data)
......@@ -899,6 +899,7 @@ def _DoConsole(console, show_command, cluster_name, feedback_fn=ToStdout,
srun = ssh.SshRunner(cluster_name=cluster_name)
ssh_cmd = srun.BuildCmd(, console.user, cmd,
batch=True, quiet=False, tty=True)
if show_command:
......@@ -193,7 +193,7 @@ def _ReadSshKeys(keyfiles, _tostderr_fn=ToStderr):
return result
def _SetupSSH(options, cluster_name, node):
def _SetupSSH(options, cluster_name, node, ssh_port):
"""Configures a destination node's SSH daemon.
@param options: Command line options
......@@ -201,6 +201,8 @@ def _SetupSSH(options, cluster_name, node):
@param cluster_name: Cluster name
@type node: string
@param node: Destination node name
@type ssh_port: int
@param ssh_port: Destination node ssh port
if options.force_join:
......@@ -226,7 +228,8 @@ def _SetupSSH(options, cluster_name, node):
bootstrap.RunNodeSetupCmd(cluster_name, node, pathutils.PREPARE_NODE_JOIN,
options.debug, options.verbose, False,
options.ssh_key_check, options.ssh_key_check, data)
options.ssh_key_check, options.ssh_key_check,
ssh_port, data)
......@@ -244,10 +247,21 @@ def AddNode(opts, args):
node = netutils.GetHostname(name=args[0]).name
readd = opts.readd
# Retrieve relevant parameters of the node group.
ssh_port = None
if opts.nodegroup:
output = cl.QueryGroups(names=[opts.nodegroup], fields=["ndp/ssh_port"],
(ssh_port, ) = output[0]
except (errors.OpPrereqError, errors.OpExecError):
output = cl.QueryNodes(names=[node], fields=["name", "sip", "master"],
output = cl.QueryNodes(names=[node],
fields=["name", "sip", "master", "ndp/ssh_port"],
node_exists, sip, is_master = output[0]
node_exists, sip, is_master, ssh_port = output[0]
except (errors.OpPrereqError, errors.OpExecError):
node_exists = ""
sip = None
......@@ -279,9 +293,9 @@ def AddNode(opts, args):
"and grant full intra-cluster ssh root access to/from it\n", node)
if opts.node_setup:
_SetupSSH(opts, cluster_name, node)
_SetupSSH(opts, cluster_name, node, ssh_port)
bootstrap.SetupNodeDaemon(opts, cluster_name, node)
bootstrap.SetupNodeDaemon(opts, cluster_name, node, ssh_port)
if opts.disk_state:
disk_state = utils.FlatToDict(opts.disk_state)
......@@ -3023,6 +3023,10 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
if self._exclusive_storage:
node_verify_param[constants.NV_EXCLUSIVEPVS] = True
node_group_uuids = dict(map(lambda n: (,,
groups_config = self.cfg.GetAllNodeGroupsInfoDict()
# At this point, we have the in-memory data structures complete,
# except for the runtime information, which we'll gather next
......@@ -3034,7 +3038,9 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
all_nvinfo = self.rpc.call_node_verify(self.my_node_uuids,
nvinfo_endtime = time.time()
if self.extra_lv_nodes and vg_name is not None:
......@@ -3042,7 +3048,9 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
{constants.NV_LVLIST: vg_name},
extra_lv_nvinfo = {}
......@@ -3077,7 +3085,9 @@ class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
key = constants.NV_FILELIST
additional_node_uuids, {key: node_verify_param[key]},
self.cfg.GetClusterName(), self.cfg.GetClusterInfo().hvparams))
self.cfg.GetClusterName(), self.cfg.GetClusterInfo().hvparams,
vf_nvinfo = all_nvinfo
vf_node_info = self.my_node_info.values()
......@@ -270,10 +270,11 @@ class LUNodeAdd(LogicalUnit):
self.master_candidate = False
node_group = self.cfg.LookupNodeGroup(
if self.op.readd:
self.new_node = existing_node_info
node_group = self.cfg.LookupNodeGroup(
self.new_node = objects.Node(name=node_name,
......@@ -313,7 +314,10 @@ class LUNodeAdd(LogicalUnit):
cname = self.cfg.GetClusterName()
result = rpcrunner.call_node_verify_light(
[node_name], vparams, cname,
{node_name: node_group},
(errmsgs, _) = CheckNodePVs(result.payload, excl_stor)
if errmsgs:
raise errors.OpPrereqError("Checks on node PVs failed: %s" %
......@@ -381,7 +385,10 @@ class LUNodeAdd(LogicalUnit):
result = self.rpc.call_node_verify(
node_verifier_uuids, node_verify_param,
{ self.cfg.LookupNodeGroup(},
for verifier in node_verifier_uuids:
result[verifier].Raise("Cannot communicate with node %s" % verifier)
nl_payload = result[verifier].payload[constants.NV_NODELIST]
......@@ -1373,6 +1373,15 @@ class ConfigWriter(object):
return self._UnlockedGetAllNodeGroupsInfo()
@locking.ssynchronized(_config_lock, shared=1)
def GetAllNodeGroupsInfoDict(self):
"""Get the configuration of all node groups expressed as a dictionary of
return dict(map(lambda (uuid, ng): (uuid, ng.ToDict()),
@locking.ssynchronized(_config_lock, shared=1)
def GetNodeGroupList(self):
"""Get a list of node groups.
......@@ -489,6 +489,9 @@ _NODE_CALLS = [
("checkdict", None, "What to verify"),
("cluster_name", None, "Cluster name"),
("all_hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
("node_groups", None, "node names mapped to their group uuids"),
("groups_cfg", None,
"a dictionary mapping group uuids to their configuration"),
], None, None, "Request verification of given parameters"),
("node_volumes", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Gets all volumes on node(s)"),
......@@ -611,6 +614,9 @@ CALLS = {
("checkdict", None, "What to verify"),
("cluster_name", None, "Cluster name"),
("hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
("node_groups", None, "node names mapped to their group uuids"),
("groups_cfg", None,
"a dictionary mapping group uuids to their configuration"),
], None, None, "Request verification of given parameters"),
"RpcClientConfig": _Prepare([
......@@ -774,15 +774,19 @@ class NodeRequestHandler(http.server.HttpServerHandler):
"""Run a verify sequence on this node.
(what, cluster_name, hvparams) = params
return backend.VerifyNode(what, cluster_name, hvparams)
(what, cluster_name, hvparams, node_groups, groups_cfg) = params
return backend.VerifyNode(what, cluster_name, hvparams,
node_groups, groups_cfg)
def perspective_node_verify_light(cls, params):
"""Run a light verify sequence on this node.
This call is meant to perform a less strict verification of the node in
certain situations. Right now, it is invoked only when a node is just about
to be added to a cluster, and even then, it performs the same checks as
# So far it's the same as the normal node_verify
return cls.perspective_node_verify(params)
......@@ -123,7 +123,8 @@ class SshRunner:
self.ipv6 = ipv6
def _BuildSshOptions(self, batch, ask_key, use_cluster_key,
strict_host_check, private_key=None, quiet=True):
strict_host_check, private_key=None, quiet=True,
"""Builds a list with needed SSH options.
@param batch: same as ssh's batch option
......@@ -134,6 +135,7 @@ class SshRunner:
@param strict_host_check: this makes the host key checking strict
@param private_key: use this private key instead of the default
@param quiet: whether to enable -q to ssh
@param port: the SSH port to use, or None to use the default
@rtype: list
@return: the list of options ready to use in L{utils.process.RunCmd}
......@@ -156,6 +158,9 @@ class SshRunner:
if private_key:
options.append("-i%s" % private_key)
if port:
options.append("-oPort=%d" % port)
# TODO: Too many boolean options, maybe convert them to more descriptive
# constants.
......@@ -190,7 +195,7 @@ class SshRunner:
def BuildCmd(self, hostname, user, command, batch=True, ask_key=False,
tty=False, use_cluster_key=True, strict_host_check=True,
private_key=None, quiet=True):
private_key=None, quiet=True, port=None):
"""Build an ssh command to execute a command on a remote node.
@param hostname: the target host, string
......@@ -205,6 +210,7 @@ class SshRunner:
@param strict_host_check: whether to check the host's SSH key at all
@param private_key: use this private key instead of the default
@param quiet: whether to enable -q to ssh
@param port: the SSH port on which the node's daemon is running
@return: the ssh call to run 'command' on the remote host.
......@@ -212,7 +218,7 @@ class SshRunner:
argv = [constants.SSH]
argv.extend(self._BuildSshOptions(batch, ask_key, use_cluster_key,
strict_host_check, private_key,
quiet=quiet, port=port))
if tty:
argv.extend(["-t", "-t"])
......@@ -277,7 +283,7 @@ class SshRunner:
return not result.failed
def VerifyNodeHostname(self, node):
def VerifyNodeHostname(self, node, ssh_port):
"""Verify hostname consistency via SSH.
This functions connects via ssh to a node and compares the hostname
......@@ -290,6 +296,7 @@ class SshRunner:
@param node: nodename of a host to check; can be short or
full qualified hostname
@param ssh_port: the port of a SSH daemon running on the node
@return: (success, detail), where:
- success: True/False
......@@ -301,7 +308,8 @@ class SshRunner:
" echo \"$GANETI_HOSTNAME\";"
retval = self.Run(node, constants.SSH_LOGIN_USER, cmd, quiet=False)
retval = self.Run(node, constants.SSH_LOGIN_USER, cmd,
quiet=False, port=ssh_port)
if retval.failed:
msg = "ssh problem"
......@@ -89,7 +89,7 @@ class TestNodeVerify(testutils.GanetiTestCase):
# this a real functional test, but requires localhost to be reachable
local_data = (netutils.Hostname.GetSysName(),
result = backend.VerifyNode({constants.NV_MASTERIP: local_data}, None, {})
result = backend.VerifyNode({constants.NV_MASTERIP: local_data}, None, {}, {}, {})
self.failUnless(constants.NV_MASTERIP in result,
"Master IP data not returned")
self.failUnless(result[constants.NV_MASTERIP], "Cannot reach localhost")
......@@ -100,7 +100,7 @@ class TestNodeVerify(testutils.GanetiTestCase):
bad_data = ("", "")
# we just test that whatever TcpPing returns, VerifyNode returns too
netutils.TcpPing = lambda a, b, source=None: False
result = backend.VerifyNode({constants.NV_MASTERIP: bad_data}, None, {})
result = backend.VerifyNode({constants.NV_MASTERIP: bad_data}, None, {}, {}, {})
self.failUnless(constants.NV_MASTERIP in result,
"Master IP data not returned")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment