Commit b989b9d9 authored by Ken Wehr's avatar Ken Wehr Committed by Guido Trotter

Adding '--no-ssh-init' option to 'gnt-cluster init'.

Allows the initialization of a cluster without the creation or distribution
of SSH key pairs. Includes changes for LeaveCluster and RPC.
Signed-off-by: default avatarKen Wehr <ksw@google.com>
Signed-off-by: default avatarGuido Trotter <ultrotter@google.com>
Reviewed-by: default avatarGuido Trotter <ultrotter@google.com>
parent 95b487bb
......@@ -85,6 +85,7 @@ Details
- Reimplemented bash completion script to be more complete
- Improved burnin
- Added option to specify maximum timeout on instance shutdown
- Added ``--no-ssh-init`` option to ``gnt-cluster init``
Version 2.0.4
......
......@@ -593,7 +593,7 @@ class NodeHttpServer(http.server.HttpServer):
"""Cleanup after leaving a cluster.
"""
return backend.LeaveCluster()
return backend.LeaveCluster(params[0])
@staticmethod
def perspective_node_volumes(params):
......
......@@ -351,7 +351,7 @@ def AddNode(dsa, dsapub, rsa, rsapub, sshkey, sshpub):
utils.RunCmd([constants.SSH_INITD_SCRIPT, "restart"])
def LeaveCluster():
def LeaveCluster(modify_ssh_setup):
"""Cleans up and remove the current node.
This function cleans up and prepares the current node to be removed
......@@ -361,19 +361,22 @@ def LeaveCluster():
L{errors.QuitGanetiException} which is used as a special case to
shutdown the node daemon.
@param modify_ssh_setup: boolean
"""
_CleanDirectory(constants.DATA_DIR)
JobQueuePurge()
try:
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
if modify_ssh_setup:
try:
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
utils.RemoveFile(priv_key)
utils.RemoveFile(pub_key)
except errors.OpExecError:
logging.exception("Error while processing ssh files")
utils.RemoveFile(priv_key)
utils.RemoveFile(pub_key)
except errors.OpExecError:
logging.exception("Error while processing ssh files")
try:
utils.RemoveFile(constants.HMAC_CLUSTER_KEY)
......
......@@ -138,7 +138,7 @@ def InitCluster(cluster_name, mac_prefix,
master_netdev, file_storage_dir, candidate_pool_size,
secondary_ip=None, vg_name=None, beparams=None,
nicparams=None, hvparams=None, enabled_hypervisors=None,
modify_etc_hosts=True):
modify_etc_hosts=True, modify_ssh_setup=True):
"""Initialise the cluster.
@type candidate_pool_size: int
......@@ -250,7 +250,8 @@ def InitCluster(cluster_name, mac_prefix,
if modify_etc_hosts:
utils.AddHostToEtcHosts(hostname.name)
_InitSSHSetup()
if modify_ssh_setup:
_InitSSHSetup()
now = time.time()
......@@ -273,6 +274,7 @@ def InitCluster(cluster_name, mac_prefix,
hvparams=hvparams,
candidate_pool_size=candidate_pool_size,
modify_etc_hosts=modify_etc_hosts,
modify_ssh_setup=modify_ssh_setup,
ctime=now,
mtime=now,
uuid=utils.NewUUID(),
......@@ -335,11 +337,13 @@ def FinalizeClusterDestroy(master):
begun in cmdlib.LUDestroyOpcode.
"""
cfg = config.ConfigWriter()
modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
result = rpc.RpcRunner.call_node_stop_master(master, True)
msg = result.fail_msg
if msg:
logging.warning("Could not disable the master role: %s" % msg)
result = rpc.RpcRunner.call_node_leave_cluster(master)
result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
msg = result.fail_msg
if msg:
logging.warning("Could not shutdown the node daemon and cleanup"
......
......@@ -84,6 +84,7 @@ __all__ = [
"NOIPCHECK_OPT",
"NOLVM_STORAGE_OPT",
"NOMODIFY_ETCHOSTS_OPT",
"NOMODIFY_SSH_SETUP_OPT",
"NONICS_OPT",
"NONLIVE_OPT",
"NONPLUS1_OPT",
......@@ -794,6 +795,10 @@ NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
help="Don't modify /etc/hosts",
action="store_false", default=True)
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
help="Don't initialize SSH keys",
action="store_false", default=True)
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
help="Enable parseable error messages",
action="store_true", default=False)
......
......@@ -867,6 +867,7 @@ class LUDestroyCluster(LogicalUnit):
"""
master = self.cfg.GetMasterNode()
modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
# Run post hooks on master node before it's removed
hm = self.proc.hmclass(self.rpc.call_hooks_runner, self)
......@@ -877,9 +878,12 @@ class LUDestroyCluster(LogicalUnit):
result = self.rpc.call_node_stop_master(master, False)
result.Raise("Could not disable the master role")
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
utils.CreateBackup(priv_key)
utils.CreateBackup(pub_key)
if modify_ssh_setup:
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
utils.CreateBackup(priv_key)
utils.CreateBackup(pub_key)
return master
......@@ -2340,6 +2344,8 @@ class LURemoveNode(LogicalUnit):
logging.info("Stopping the node daemon and removing configs from node %s",
node.name)
modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
# Promote nodes to master candidate as needed
_AdjustCandidatePool(self, exceptions=[node.name])
self.context.RemoveNode(node.name)
......@@ -2351,7 +2357,7 @@ class LURemoveNode(LogicalUnit):
except:
self.LogWarning("Errors occurred running hooks on %s" % node.name)
result = self.rpc.call_node_leave_cluster(node.name)
result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup)
msg = result.fail_msg
if msg:
self.LogWarning("Errors encountered on the remote node while leaving"
......@@ -2900,20 +2906,21 @@ class LUAddNode(LogicalUnit):
(constants.PROTOCOL_VERSION, result.payload))
# setup ssh on node
logging.info("Copy ssh key to node %s", node)
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
keyarray = []
keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
priv_key, pub_key]
for i in keyfiles:
keyarray.append(utils.ReadFile(i))
result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
keyarray[2],
keyarray[3], keyarray[4], keyarray[5])
result.Raise("Cannot transfer ssh keys to the new node")
if self.cfg.GetClusterInfo().modify_ssh_setup:
logging.info("Copy ssh key to node %s", node)
priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
keyarray = []
keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
priv_key, pub_key]
for i in keyfiles:
keyarray.append(utils.ReadFile(i))
result = self.rpc.call_node_add(node, keyarray[0], keyarray[1],
keyarray[2], keyarray[3], keyarray[4],
keyarray[5])
result.Raise("Cannot transfer ssh keys to the new node")
# Add node to our /etc/hosts, and add key to known_hosts
if self.cfg.GetClusterInfo().modify_etc_hosts:
......
......@@ -825,6 +825,7 @@ class Cluster(TaggableObject):
"nicparams",
"candidate_pool_size",
"modify_etc_hosts",
"modify_ssh_setup",
] + _TIMESTAMPS + _UUID
def UpgradeConfig(self):
......@@ -850,6 +851,9 @@ class Cluster(TaggableObject):
if self.modify_etc_hosts is None:
self.modify_etc_hosts = True
if self.modify_ssh_setup is None:
self.modify_ssh_setup = True
# default_bridge is no longer used it 2.1. The slot is left there to
# support auto-upgrading, but will be removed in 2.2
if self.default_bridge is not None:
......
......@@ -1044,7 +1044,7 @@ class RpcRunner(object):
return self._SingleNodeCall(node, "export_remove", [export])
@classmethod
def call_node_leave_cluster(cls, node):
def call_node_leave_cluster(cls, node, modify_ssh_setup):
"""Requests a node to clean the cluster information it has.
This will remove the configuration information from the ganeti data
......@@ -1053,7 +1053,8 @@ class RpcRunner(object):
This is a single-node call.
"""
return cls._StaticSingleNodeCall(node, "node_leave_cluster", [])
return cls._StaticSingleNodeCall(node, "node_leave_cluster",
[modify_ssh_setup])
def call_node_volumes(self, node_list):
"""Gets all volumes on node(s).
......
......@@ -221,6 +221,10 @@
<sbr>
<arg>--no-lvm-storage</arg>
<sbr>
<arg>--no-etc-hosts</arg>
<sbr>
<arg>--no-ssh-init</arg>
<sbr>
<arg>--file-storage-dir <replaceable>dir</replaceable></arg>
<sbr>
<arg>--enabled-hypervisors <replaceable>hypervisors</replaceable></arg>
......@@ -296,13 +300,23 @@
</para>
<para>
The <option>--no-lvm-storage</option> allows you to initialize the
cluster without lvm support. This means that only instances using
The <option>--no-lvm-storage</option> option allows you to initialize
the cluster without lvm support. This means that only instances using
files as storage backend will be possible to create. Once the cluster
is initialized you can change this setup with the
<command>modify</command> command.
</para>
<para>
The <option>--no-etc-hosts</option> option allows you to initialize the
cluster without modifying the <filename>/etc/hosts</filename> file.
</para>
<para>
The <option>--no-ssh-init</option> option allows you to initialize the
cluster without creating or distributing SSH key pairs.
</para>
<para>
The <option>--file-storage-dir</option> option allows you
set the directory to use for storing the instance disk
......
......@@ -99,6 +99,7 @@ def InitCluster(opts, args):
nicparams=nicparams,
candidate_pool_size=opts.candidate_pool_size,
modify_etc_hosts=opts.modify_etc_hosts,
modify_ssh_setup=opts.modify_ssh_setup,
)
op = opcodes.OpPostInitCluster()
SubmitOpCode(op)
......@@ -600,7 +601,8 @@ commands = {
InitCluster, [ArgHost(min=1, max=1)],
[BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT,
NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT, SECONDARY_IP_OPT, VG_NAME_OPT],
NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT, NOMODIFY_SSH_SETUP_OPT,
SECONDARY_IP_OPT, VG_NAME_OPT],
"[opts...] <cluster_name>", "Initialises a new cluster configuration"),
'destroy': (
DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment