bootstrap.py 16.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
#
#

# Copyright (C) 2006, 2007, 2008 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Functions to bootstrap a new cluster.

"""

import os
import os.path
import sha
import re
30
import logging
31
import tempfile
32 33 34 35 36 37 38

from ganeti import rpc
from ganeti import ssh
from ganeti import utils
from ganeti import errors
from ganeti import config
from ganeti import constants
39
from ganeti import objects
40 41
from ganeti import ssconf

42

43
def _InitSSHSetup():
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
  """Setup the SSH configuration for the cluster.

  This generates a dsa keypair for root, adds the pub key to the
  permitted hosts and adds the hostkey to its own known hosts.

  """
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)

  for name in priv_key, pub_key:
    if os.path.exists(name):
      utils.CreateBackup(name)
    utils.RemoveFile(name)

  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
                         "-f", priv_key,
                         "-q", "-N", ""])
  if result.failed:
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
                             result.output)

  f = open(pub_key, 'r')
  try:
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
  finally:
    f.close()


71 72
def _GenerateSelfSignedSslCert(file_name, validity=(365 * 5)):
  """Generates a self-signed SSL certificate.
73

74 75 76 77
  @type file_name: str
  @param file_name: Path to output file
  @type validity: int
  @param validity: Validity for certificate in days
78 79

  """
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
  (fd, tmp_file_name) = tempfile.mkstemp(dir=os.path.dirname(file_name))
  try:
    # Set permissions before writing key
    os.chmod(tmp_file_name, 0600)

    result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
                           "-days", str(validity), "-nodes", "-x509",
                           "-keyout", tmp_file_name, "-out", tmp_file_name,
                           "-batch"])
    if result.failed:
      raise errors.OpExecError("Could not generate SSL certificate, command"
                               " %s had exitcode %s and error message %s" %
                               (result.cmd, result.exit_code, result.output))

    # Make read-only
    os.chmod(tmp_file_name, 0400)

    os.rename(tmp_file_name, file_name)
  finally:
    utils.RemoveFile(tmp_file_name)
100 101 102 103 104 105 106 107 108 109


def _InitGanetiServerSetup():
  """Setup the necessary configuration for the initial node daemon.

  This creates the nodepass file containing the shared password for
  the cluster and also generates the SSL certificate.

  """
  _GenerateSelfSignedSslCert(constants.SSL_CERT_FILE)
110

111 112 113 114
  # Don't overwrite existing file
  if not os.path.exists(constants.RAPI_CERT_FILE):
    _GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE)

115 116 117 118 119 120 121 122
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])

  if result.failed:
    raise errors.OpExecError("Could not start the node daemon, command %s"
                             " had exitcode %s and error %s" %
                             (result.cmd, result.exit_code, result.output))


123
def InitCluster(cluster_name, mac_prefix, def_bridge,
124 125
                master_netdev, file_storage_dir, candidate_pool_size,
                secondary_ip=None, vg_name=None, beparams=None, hvparams=None,
126
                enabled_hypervisors=None, default_hypervisor=None):
127 128
  """Initialise the cluster.

129 130 131
  @type candidate_pool_size: int
  @param candidate_pool_size: master candidate pool size

132
  """
133
  # TODO: complete the docstring
134 135 136 137 138 139 140 141 142 143
  if config.ConfigWriter.IsCluster():
    raise errors.OpPrereqError("Cluster is already initialised")

  hostname = utils.HostInfo()

  if hostname.ip.startswith("127."):
    raise errors.OpPrereqError("This host's IP resolves to the private"
                               " range (%s). Please fix DNS or %s." %
                               (hostname.ip, constants.ETC_HOSTS))

144
  if not utils.OwnIpAddress(hostname.ip):
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
                               " to %s,\nbut this ip address does not"
                               " belong to this host."
                               " Aborting." % hostname.ip)

  clustername = utils.HostInfo(cluster_name)

  if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
                   timeout=5):
    raise errors.OpPrereqError("Cluster IP already active. Aborting.")

  if secondary_ip:
    if not utils.IsValidIP(secondary_ip):
      raise errors.OpPrereqError("Invalid secondary ip given")
    if (secondary_ip != hostname.ip and
160
        not utils.OwnIpAddress(secondary_ip)):
161 162 163
      raise errors.OpPrereqError("You gave %s as secondary IP,"
                                 " but it does not belong to this host." %
                                 secondary_ip)
164 165
  else:
    secondary_ip = hostname.ip
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206

  if vg_name is not None:
    # Check if volume group is valid
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
                                          constants.MIN_VG_SIZE)
    if vgstatus:
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
                                 " you are not using lvm" % vgstatus)

  file_storage_dir = os.path.normpath(file_storage_dir)

  if not os.path.isabs(file_storage_dir):
    raise errors.OpPrereqError("The file storage directory you passed is"
                               " not an absolute path.")

  if not os.path.exists(file_storage_dir):
    try:
      os.makedirs(file_storage_dir, 0750)
    except OSError, err:
      raise errors.OpPrereqError("Cannot create file storage directory"
                                 " '%s': %s" %
                                 (file_storage_dir, err))

  if not os.path.isdir(file_storage_dir):
    raise errors.OpPrereqError("The file storage directory '%s' is not"
                               " a directory." % file_storage_dir)

  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)

  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
  if result.failed:
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
                               (master_netdev,
                                result.output.strip()))

  if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
          os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
    raise errors.OpPrereqError("Init.d script '%s' missing or not"
                               " executable." % constants.NODE_INITD_SCRIPT)

207 208
  utils.CheckBEParams(beparams)

209
  # set up the inter-node password and certificate
Michael Hanselmann's avatar
Michael Hanselmann committed
210
  _InitGanetiServerSetup()
211 212 213 214 215 216 217 218 219 220

  # set up ssh config and /etc/hosts
  f = open(constants.SSH_HOST_RSA_PUB, 'r')
  try:
    sshline = f.read()
  finally:
    f.close()
  sshkey = sshline.split(" ")[1]

  utils.AddHostToEtcHosts(hostname.name)
221
  _InitSSHSetup()
222 223

  # init of cluster config file
224 225 226 227 228 229 230 231
  cluster_config = objects.Cluster(
    serial_no=1,
    rsahostkeypub=sshkey,
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
    mac_prefix=mac_prefix,
    volume_group_name=vg_name,
    default_bridge=def_bridge,
    tcpudp_port_pool=set(),
232 233 234 235 236
    master_node=hostname.name,
    master_ip=clustername.ip,
    master_netdev=master_netdev,
    cluster_name=clustername.name,
    file_storage_dir=file_storage_dir,
237
    enabled_hypervisors=enabled_hypervisors,
238
    default_hypervisor=default_hypervisor,
239 240
    beparams={constants.BEGR_DEFAULT: beparams},
    hvparams=hvparams,
241
    candidate_pool_size=candidate_pool_size,
242 243 244
    )
  master_node_config = objects.Node(name=hostname.name,
                                    primary_ip=hostname.ip,
245
                                    secondary_ip=secondary_ip,
246 247
                                    serial_no=1,
                                    master_candidate=True,
248
                                    offline=False,
249
                                    )
250

251 252 253 254 255
  sscfg = InitConfig(constants.CONFIG_VERSION,
                     cluster_config, master_node_config)
  ssh.WriteKnownHostsFile(sscfg, constants.SSH_KNOWN_HOSTS_FILE)
  cfg = config.ConfigWriter()
  cfg.Update(cfg.GetClusterInfo())
256

Iustin Pop's avatar
Iustin Pop committed
257 258
  # start the master ip
  # TODO: Review rpc call from bootstrap
259
  rpc.RpcRunner.call_node_start_master(hostname.name, True)
Iustin Pop's avatar
Iustin Pop committed
260

261

Oleksiy Mishchenko's avatar
Oleksiy Mishchenko committed
262 263
def InitConfig(version, cluster_config, master_node_config,
               cfg_file=constants.CLUSTER_CONF_FILE):
Iustin Pop's avatar
Iustin Pop committed
264 265 266 267 268 269
  """Create the initial cluster configuration.

  It will contain the current node, which will also be the master
  node, and no instances.

  @type version: int
Iustin Pop's avatar
Iustin Pop committed
270 271 272 273 274 275 276 277 278 279
  @param version: configuration version
  @type cluster_config: L{objects.Cluster}
  @param cluster_config: cluster configuration
  @type master_node_config: L{objects.Node}
  @param master_node_config: master node configuration
  @type cfg_file: string
  @param cfg_file: configuration file path

  @rtype: L{ssconf.SimpleConfigWriter}
  @returns: initialized config instance
Iustin Pop's avatar
Iustin Pop committed
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294

  """
  nodes = {
    master_node_config.name: master_node_config,
    }

  config_data = objects.ConfigData(version=version,
                                   cluster=cluster_config,
                                   nodes=nodes,
                                   instances={},
                                   serial_no=1)
  cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
  cfg.Save()

  return cfg
Oleksiy Mishchenko's avatar
Oleksiy Mishchenko committed
295 296


Iustin Pop's avatar
Iustin Pop committed
297 298 299 300 301 302 303
def FinalizeClusterDestroy(master):
  """Execute the last steps of cluster destroy

  This function shuts down all the daemons, completing the destroy
  begun in cmdlib.LUDestroyOpcode.

  """
304 305
  result = rpc.RpcRunner.call_node_stop_master(master, True)
  if result.failed or not result.data:
Iustin Pop's avatar
Iustin Pop committed
306
    logging.warning("Could not disable the master role")
307 308
  result = rpc.RpcRunner.call_node_leave_cluster(master)
  if result.failed or not result.data:
Iustin Pop's avatar
Iustin Pop committed
309 310 311
    logging.warning("Could not shutdown the node daemon and cleanup the node")


Iustin Pop's avatar
Iustin Pop committed
312
def SetupNodeDaemon(cluster_name, node, ssh_key_check):
313 314
  """Add a node to the cluster.

315 316 317
  This function must be called before the actual opcode, and will ssh
  to the remote node, copy the needed files, and start ganeti-noded,
  allowing the master to do the rest via normal rpc calls.
318

Iustin Pop's avatar
Iustin Pop committed
319 320 321
  @param cluster_name: the cluster name
  @param node: the name of the new node
  @param ssh_key_check: whether to do a strict key check
322 323

  """
Iustin Pop's avatar
Iustin Pop committed
324
  sshrunner = ssh.SshRunner(cluster_name)
325
  gntpem = utils.ReadFile(constants.SSL_CERT_FILE)
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
  # in the base64 pem encoding, neither '!' nor '.' are valid chars,
  # so we use this to detect an invalid certificate; as long as the
  # cert doesn't contain this, the here-document will be correctly
  # parsed by the shell sequence below
  if re.search('^!EOF\.', gntpem, re.MULTILINE):
    raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
  if not gntpem.endswith("\n"):
    raise errors.OpExecError("PEM must end with newline")

  # set up inter-node password and certificate and restarts the node daemon
  # and then connect with ssh to set password and start ganeti-noded
  # note that all the below variables are sanitized at this point,
  # either by being constants or by the checks above
  mycommand = ("umask 077 && "
               "cat > '%s' << '!EOF.' && \n"
               "%s!EOF.\n%s restart" %
342
               (constants.SSL_CERT_FILE, gntpem,
343 344
                constants.NODE_INITD_SCRIPT))

345 346 347 348
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
                         ask_key=ssh_key_check,
                         use_cluster_key=False,
                         strict_host_check=ssh_key_check)
349 350 351 352 353
  if result.failed:
    raise errors.OpExecError("Remote command on node %s, error: %s,"
                             " output: %s" %
                             (node, result.fail_reason, result.output))

354 355 356 357 358 359 360 361 362

def MasterFailover():
  """Failover the master node.

  This checks that we are not already the master, and will cause the
  current master to cease being master, and the non-master to become
  new master.

  """
363
  sstore = ssconf.SimpleStore()
364

365 366 367
  old_master, new_master = ssconf.GetMasterAndMyself(sstore)
  node_list = sstore.GetNodeList()
  mc_list = sstore.GetMasterCandidates()
368 369 370 371 372 373

  if old_master == new_master:
    raise errors.OpPrereqError("This commands must be run on the node"
                               " where you want the new master to be."
                               " %s is already the master" %
                               old_master)
374

375 376 377 378 379 380 381 382
  if new_master not in mc_list:
    mc_no_master = [name for name in mc_list if name != old_master]
    raise errors.OpPrereqError("This node is not among the nodes marked"
                               " as master candidates. Only these nodes"
                               " can become masters. Current list of"
                               " master candidates is:\n"
                               "%s" % ('\n'.join(mc_no_master)))

383 384 385 386 387 388 389 390 391 392 393 394
  vote_list = GatherMasterVotes(node_list)

  if vote_list:
    voted_master = vote_list[0][0]
    if voted_master is None:
      raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not"
                                 " respond.")
    elif voted_master != old_master:
      raise errors.OpPrereqError("I have wrong configuration, I believe the"
                                 " master is %s but the other nodes voted for"
                                 " %s. Please resync the configuration of"
                                 " this node." % (old_master, voted_master))
395 396 397 398
  # end checks

  rcode = 0

399
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
400

401 402
  result = rpc.RpcRunner.call_node_stop_master(old_master, True)
  if result.failed or not result.data:
403
    logging.error("Could not disable the master role on the old master"
404 405
                 " %s, please disable manually", old_master)

Michael Hanselmann's avatar
Michael Hanselmann committed
406
  # Here we have a phase where no master should be running
407

Iustin Pop's avatar
Iustin Pop committed
408 409 410
  # instantiate a real config writer, as we now know we have the
  # configuration data
  cfg = config.ConfigWriter()
411

Iustin Pop's avatar
Iustin Pop committed
412 413 414 415 416
  cluster_info = cfg.GetClusterInfo()
  cluster_info.master_node = new_master
  # this will also regenerate the ssconf files, since we updated the
  # cluster info
  cfg.Update(cluster_info)
417

418 419
  result = rpc.RpcRunner.call_node_start_master(new_master, True)
  if result.failed or not result.data:
420
    logging.error("Could not start the master role on the new master"
421 422 423 424
                  " %s, please check", new_master)
    rcode = 1

  return rcode
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462


def GatherMasterVotes(node_list):
  """Check the agreement on who is the master.

  This function will return a list of (node, number of votes), ordered
  by the number of votes. Errors will be denoted by the key 'None'.

  Note that the sum of votes is the number of nodes this machine
  knows, whereas the number of entries in the list could be different
  (if some nodes vote for another master).

  We remove ourselves from the list since we know that (bugs aside)
  since we use the same source for configuration information for both
  backend and boostrap, we'll always vote for ourselves.

  @type node_list: list
  @param node_list: the list of nodes to query for master info; the current
      node wil be removed if it is in the list
  @rtype: list
  @return: list of (node, votes)

  """
  myself = utils.HostInfo().name
  try:
    node_list.remove(myself)
  except ValueError:
    pass
  if not node_list:
    # no nodes left (eventually after removing myself)
    return []
  results = rpc.RpcRunner.call_master_info(node_list)
  if not isinstance(results, dict):
    # this should not happen (unless internal error in rpc)
    logging.critical("Can't complete rpc call, aborting master startup")
    return [(None, len(node_list))]
  votes = {}
  for node in results:
463 464 465
    nres = results[node]
    data = nres.data
    if nres.failed or not isinstance(data, (tuple, list)) or len(data) < 3:
466 467 468 469 470
      # here the rpc layer should have already logged errors
      if None not in votes:
        votes[None] = 0
      votes[None] += 1
      continue
471
    master_node = data[2]
472 473 474 475 476 477 478 479 480 481 482
    if master_node not in votes:
      votes[master_node] = 0
    votes[master_node] += 1

  vote_list = [v for v in votes.items()]
  # sort first on number of votes then on name, since we want None
  # sorted later if we have the half of the nodes not responding, and
  # half voting all for the same master
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)

  return vote_list