bootstrap.py 16.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
#
#

# Copyright (C) 2006, 2007, 2008 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Functions to bootstrap a new cluster.

"""

import os
import os.path
import sha
import re
30
import logging
31
32
33
34
35
36
37

from ganeti import rpc
from ganeti import ssh
from ganeti import utils
from ganeti import errors
from ganeti import config
from ganeti import constants
38
from ganeti import objects
39
40
from ganeti import ssconf

41

42
def _InitSSHSetup():
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
  """Setup the SSH configuration for the cluster.

  This generates a dsa keypair for root, adds the pub key to the
  permitted hosts and adds the hostkey to its own known hosts.

  """
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)

  for name in priv_key, pub_key:
    if os.path.exists(name):
      utils.CreateBackup(name)
    utils.RemoveFile(name)

  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
                         "-f", priv_key,
                         "-q", "-N", ""])
  if result.failed:
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
                             result.output)

  f = open(pub_key, 'r')
  try:
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
  finally:
    f.close()


70
71
def _GenerateSelfSignedSslCert(file_name, validity=(365 * 5)):
  """Generates a self-signed SSL certificate.
72

73
74
75
76
  @type file_name: str
  @param file_name: Path to output file
  @type validity: int
  @param validity: Validity for certificate in days
77
78
79

  """
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
80
81
                         "-days", str(validity), "-nodes", "-x509",
                         "-keyout", file_name, "-out", file_name, "-batch"])
82
  if result.failed:
83
    raise errors.OpExecError("Could not generate SSL certificate, command"
84
85
86
                             " %s had exitcode %s and error message %s" %
                             (result.cmd, result.exit_code, result.output))

87
88
89
90
91
92
93
94
95
96
97
  os.chmod(file_name, 0400)


def _InitGanetiServerSetup():
  """Setup the necessary configuration for the initial node daemon.

  This creates the nodepass file containing the shared password for
  the cluster and also generates the SSL certificate.

  """
  _GenerateSelfSignedSslCert(constants.SSL_CERT_FILE)
98

99
100
101
102
  # Don't overwrite existing file
  if not os.path.exists(constants.RAPI_CERT_FILE):
    _GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE)

103
104
105
106
107
108
109
110
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])

  if result.failed:
    raise errors.OpExecError("Could not start the node daemon, command %s"
                             " had exitcode %s and error %s" %
                             (result.cmd, result.exit_code, result.output))


111
def InitCluster(cluster_name, mac_prefix, def_bridge,
112
113
                master_netdev, file_storage_dir, candidate_pool_size,
                secondary_ip=None, vg_name=None, beparams=None, hvparams=None,
114
                enabled_hypervisors=None, default_hypervisor=None):
115
116
  """Initialise the cluster.

117
118
119
  @type candidate_pool_size: int
  @param candidate_pool_size: master candidate pool size

120
  """
121
  # TODO: complete the docstring
122
123
124
125
126
127
128
129
130
131
  if config.ConfigWriter.IsCluster():
    raise errors.OpPrereqError("Cluster is already initialised")

  hostname = utils.HostInfo()

  if hostname.ip.startswith("127."):
    raise errors.OpPrereqError("This host's IP resolves to the private"
                               " range (%s). Please fix DNS or %s." %
                               (hostname.ip, constants.ETC_HOSTS))

132
  if not utils.OwnIpAddress(hostname.ip):
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
                               " to %s,\nbut this ip address does not"
                               " belong to this host."
                               " Aborting." % hostname.ip)

  clustername = utils.HostInfo(cluster_name)

  if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
                   timeout=5):
    raise errors.OpPrereqError("Cluster IP already active. Aborting.")

  if secondary_ip:
    if not utils.IsValidIP(secondary_ip):
      raise errors.OpPrereqError("Invalid secondary ip given")
    if (secondary_ip != hostname.ip and
148
        not utils.OwnIpAddress(secondary_ip)):
149
150
151
      raise errors.OpPrereqError("You gave %s as secondary IP,"
                                 " but it does not belong to this host." %
                                 secondary_ip)
152
153
  else:
    secondary_ip = hostname.ip
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194

  if vg_name is not None:
    # Check if volume group is valid
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
                                          constants.MIN_VG_SIZE)
    if vgstatus:
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
                                 " you are not using lvm" % vgstatus)

  file_storage_dir = os.path.normpath(file_storage_dir)

  if not os.path.isabs(file_storage_dir):
    raise errors.OpPrereqError("The file storage directory you passed is"
                               " not an absolute path.")

  if not os.path.exists(file_storage_dir):
    try:
      os.makedirs(file_storage_dir, 0750)
    except OSError, err:
      raise errors.OpPrereqError("Cannot create file storage directory"
                                 " '%s': %s" %
                                 (file_storage_dir, err))

  if not os.path.isdir(file_storage_dir):
    raise errors.OpPrereqError("The file storage directory '%s' is not"
                               " a directory." % file_storage_dir)

  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)

  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
  if result.failed:
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
                               (master_netdev,
                                result.output.strip()))

  if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
          os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
    raise errors.OpPrereqError("Init.d script '%s' missing or not"
                               " executable." % constants.NODE_INITD_SCRIPT)

195
196
  utils.CheckBEParams(beparams)

197
  # set up the inter-node password and certificate
Michael Hanselmann's avatar
Michael Hanselmann committed
198
  _InitGanetiServerSetup()
199
200
201
202
203
204
205
206
207
208

  # set up ssh config and /etc/hosts
  f = open(constants.SSH_HOST_RSA_PUB, 'r')
  try:
    sshline = f.read()
  finally:
    f.close()
  sshkey = sshline.split(" ")[1]

  utils.AddHostToEtcHosts(hostname.name)
209
  _InitSSHSetup()
210
211

  # init of cluster config file
212
213
214
215
216
217
218
219
  cluster_config = objects.Cluster(
    serial_no=1,
    rsahostkeypub=sshkey,
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
    mac_prefix=mac_prefix,
    volume_group_name=vg_name,
    default_bridge=def_bridge,
    tcpudp_port_pool=set(),
220
221
222
223
224
    master_node=hostname.name,
    master_ip=clustername.ip,
    master_netdev=master_netdev,
    cluster_name=clustername.name,
    file_storage_dir=file_storage_dir,
225
    enabled_hypervisors=enabled_hypervisors,
226
    default_hypervisor=default_hypervisor,
227
228
    beparams={constants.BEGR_DEFAULT: beparams},
    hvparams=hvparams,
229
    candidate_pool_size=candidate_pool_size,
230
231
232
    )
  master_node_config = objects.Node(name=hostname.name,
                                    primary_ip=hostname.ip,
233
                                    secondary_ip=secondary_ip,
234
235
                                    serial_no=1,
                                    master_candidate=True,
236
                                    offline=False,
237
                                    )
238

239
240
241
242
243
  sscfg = InitConfig(constants.CONFIG_VERSION,
                     cluster_config, master_node_config)
  ssh.WriteKnownHostsFile(sscfg, constants.SSH_KNOWN_HOSTS_FILE)
  cfg = config.ConfigWriter()
  cfg.Update(cfg.GetClusterInfo())
244

Iustin Pop's avatar
Iustin Pop committed
245
246
  # start the master ip
  # TODO: Review rpc call from bootstrap
247
  rpc.RpcRunner.call_node_start_master(hostname.name, True)
Iustin Pop's avatar
Iustin Pop committed
248

249

Oleksiy Mishchenko's avatar
Oleksiy Mishchenko committed
250
251
def InitConfig(version, cluster_config, master_node_config,
               cfg_file=constants.CLUSTER_CONF_FILE):
Iustin Pop's avatar
Iustin Pop committed
252
253
254
255
256
257
  """Create the initial cluster configuration.

  It will contain the current node, which will also be the master
  node, and no instances.

  @type version: int
Iustin Pop's avatar
Iustin Pop committed
258
259
260
261
262
263
264
265
266
267
  @param version: configuration version
  @type cluster_config: L{objects.Cluster}
  @param cluster_config: cluster configuration
  @type master_node_config: L{objects.Node}
  @param master_node_config: master node configuration
  @type cfg_file: string
  @param cfg_file: configuration file path

  @rtype: L{ssconf.SimpleConfigWriter}
  @returns: initialized config instance
Iustin Pop's avatar
Iustin Pop committed
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282

  """
  nodes = {
    master_node_config.name: master_node_config,
    }

  config_data = objects.ConfigData(version=version,
                                   cluster=cluster_config,
                                   nodes=nodes,
                                   instances={},
                                   serial_no=1)
  cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
  cfg.Save()

  return cfg
Oleksiy Mishchenko's avatar
Oleksiy Mishchenko committed
283
284


Iustin Pop's avatar
Iustin Pop committed
285
286
287
288
289
290
291
def FinalizeClusterDestroy(master):
  """Execute the last steps of cluster destroy

  This function shuts down all the daemons, completing the destroy
  begun in cmdlib.LUDestroyOpcode.

  """
292
293
  result = rpc.RpcRunner.call_node_stop_master(master, True)
  if result.failed or not result.data:
Iustin Pop's avatar
Iustin Pop committed
294
    logging.warning("Could not disable the master role")
295
296
  result = rpc.RpcRunner.call_node_leave_cluster(master)
  if result.failed or not result.data:
Iustin Pop's avatar
Iustin Pop committed
297
298
299
    logging.warning("Could not shutdown the node daemon and cleanup the node")


Iustin Pop's avatar
Iustin Pop committed
300
def SetupNodeDaemon(cluster_name, node, ssh_key_check):
301
302
  """Add a node to the cluster.

303
304
305
  This function must be called before the actual opcode, and will ssh
  to the remote node, copy the needed files, and start ganeti-noded,
  allowing the master to do the rest via normal rpc calls.
306

Iustin Pop's avatar
Iustin Pop committed
307
308
309
  @param cluster_name: the cluster name
  @param node: the name of the new node
  @param ssh_key_check: whether to do a strict key check
310
311

  """
Iustin Pop's avatar
Iustin Pop committed
312
  sshrunner = ssh.SshRunner(cluster_name)
313
  gntpem = utils.ReadFile(constants.SSL_CERT_FILE)
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
  # in the base64 pem encoding, neither '!' nor '.' are valid chars,
  # so we use this to detect an invalid certificate; as long as the
  # cert doesn't contain this, the here-document will be correctly
  # parsed by the shell sequence below
  if re.search('^!EOF\.', gntpem, re.MULTILINE):
    raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
  if not gntpem.endswith("\n"):
    raise errors.OpExecError("PEM must end with newline")

  # set up inter-node password and certificate and restarts the node daemon
  # and then connect with ssh to set password and start ganeti-noded
  # note that all the below variables are sanitized at this point,
  # either by being constants or by the checks above
  mycommand = ("umask 077 && "
               "cat > '%s' << '!EOF.' && \n"
               "%s!EOF.\n%s restart" %
330
               (constants.SSL_CERT_FILE, gntpem,
331
332
                constants.NODE_INITD_SCRIPT))

333
334
335
336
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
                         ask_key=ssh_key_check,
                         use_cluster_key=False,
                         strict_host_check=ssh_key_check)
337
338
339
340
341
  if result.failed:
    raise errors.OpExecError("Remote command on node %s, error: %s,"
                             " output: %s" %
                             (node, result.fail_reason, result.output))

342
343
344
345
346
347
348
349
350

def MasterFailover():
  """Failover the master node.

  This checks that we are not already the master, and will cause the
  current master to cease being master, and the non-master to become
  new master.

  """
351
  sstore = ssconf.SimpleStore()
352

353
354
355
  old_master, new_master = ssconf.GetMasterAndMyself(sstore)
  node_list = sstore.GetNodeList()
  mc_list = sstore.GetMasterCandidates()
356
357
358
359
360
361

  if old_master == new_master:
    raise errors.OpPrereqError("This commands must be run on the node"
                               " where you want the new master to be."
                               " %s is already the master" %
                               old_master)
362

363
364
365
366
367
368
369
370
  if new_master not in mc_list:
    mc_no_master = [name for name in mc_list if name != old_master]
    raise errors.OpPrereqError("This node is not among the nodes marked"
                               " as master candidates. Only these nodes"
                               " can become masters. Current list of"
                               " master candidates is:\n"
                               "%s" % ('\n'.join(mc_no_master)))

371
372
373
374
375
376
377
378
379
380
381
382
  vote_list = GatherMasterVotes(node_list)

  if vote_list:
    voted_master = vote_list[0][0]
    if voted_master is None:
      raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not"
                                 " respond.")
    elif voted_master != old_master:
      raise errors.OpPrereqError("I have wrong configuration, I believe the"
                                 " master is %s but the other nodes voted for"
                                 " %s. Please resync the configuration of"
                                 " this node." % (old_master, voted_master))
383
384
385
386
  # end checks

  rcode = 0

387
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
388

389
390
  result = rpc.RpcRunner.call_node_stop_master(old_master, True)
  if result.failed or not result.data:
391
    logging.error("Could not disable the master role on the old master"
392
393
                 " %s, please disable manually", old_master)

Michael Hanselmann's avatar
Michael Hanselmann committed
394
  # Here we have a phase where no master should be running
395

Iustin Pop's avatar
Iustin Pop committed
396
397
398
  # instantiate a real config writer, as we now know we have the
  # configuration data
  cfg = config.ConfigWriter()
399

Iustin Pop's avatar
Iustin Pop committed
400
401
402
403
404
  cluster_info = cfg.GetClusterInfo()
  cluster_info.master_node = new_master
  # this will also regenerate the ssconf files, since we updated the
  # cluster info
  cfg.Update(cluster_info)
405

406
407
  result = rpc.RpcRunner.call_node_start_master(new_master, True)
  if result.failed or not result.data:
408
    logging.error("Could not start the master role on the new master"
409
410
411
412
                  " %s, please check", new_master)
    rcode = 1

  return rcode
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450


def GatherMasterVotes(node_list):
  """Check the agreement on who is the master.

  This function will return a list of (node, number of votes), ordered
  by the number of votes. Errors will be denoted by the key 'None'.

  Note that the sum of votes is the number of nodes this machine
  knows, whereas the number of entries in the list could be different
  (if some nodes vote for another master).

  We remove ourselves from the list since we know that (bugs aside)
  since we use the same source for configuration information for both
  backend and boostrap, we'll always vote for ourselves.

  @type node_list: list
  @param node_list: the list of nodes to query for master info; the current
      node wil be removed if it is in the list
  @rtype: list
  @return: list of (node, votes)

  """
  myself = utils.HostInfo().name
  try:
    node_list.remove(myself)
  except ValueError:
    pass
  if not node_list:
    # no nodes left (eventually after removing myself)
    return []
  results = rpc.RpcRunner.call_master_info(node_list)
  if not isinstance(results, dict):
    # this should not happen (unless internal error in rpc)
    logging.critical("Can't complete rpc call, aborting master startup")
    return [(None, len(node_list))]
  votes = {}
  for node in results:
451
452
453
    nres = results[node]
    data = nres.data
    if nres.failed or not isinstance(data, (tuple, list)) or len(data) < 3:
454
455
456
457
458
      # here the rpc layer should have already logged errors
      if None not in votes:
        votes[None] = 0
      votes[None] += 1
      continue
459
    master_node = data[2]
460
461
462
463
464
465
466
467
468
469
470
    if master_node not in votes:
      votes[master_node] = 0
    votes[master_node] += 1

  vote_list = [v for v in votes.items()]
  # sort first on number of votes then on name, since we want None
  # sorted later if we have the half of the nodes not responding, and
  # half voting all for the same master
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)

  return vote_list