cfgupgrade 22.1 KB
Newer Older
1
2
3
#!/usr/bin/python
#

4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Tool to upgrade the configuration file.

24
25
This code handles only the types supported by simplejson. As an
example, 'set' is a 'list'.
26
27
28
29
30
31
32
33

"""


import os
import os.path
import sys
import optparse
34
import logging
35
36
import time
from cStringIO import StringIO
37

38
39
from ganeti import constants
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46
47


48
49
options = None
args = None
50

Iustin Pop's avatar
Iustin Pop committed
51

52
53
54
#: Target major version we will upgrade to
TARGET_MAJOR = 2
#: Target minor version we will upgrade to
Michele Tartara's avatar
Michele Tartara committed
55
TARGET_MINOR = 9
56
57
58
#: Target major version for downgrade
DOWNGRADE_MAJOR = 2
#: Target minor version for downgrade
Michele Tartara's avatar
Michele Tartara committed
59
DOWNGRADE_MINOR = 8
60

61
62
63
64
# map of legacy device types
# (mapping differing old LD_* constants to new DT_* constants)
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
# (mapping differing new DT_* constants to old LD_* constants)
Helga Velroyen's avatar
Helga Velroyen committed
65
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
66

67

68
69
70
class Error(Exception):
  """Generic exception"""
  pass
71
72


73
74
75
76
77
78
79
80
81
82
83
84
85
def SetupLogging():
  """Configures the logging module.

  """
  formatter = logging.Formatter("%(asctime)s: %(message)s")

  stderr_handler = logging.StreamHandler()
  stderr_handler.setFormatter(formatter)
  if options.debug:
    stderr_handler.setLevel(logging.NOTSET)
  elif options.verbose:
    stderr_handler.setLevel(logging.INFO)
  else:
86
    stderr_handler.setLevel(logging.WARNING)
87
88
89
90
91
92

  root_logger = logging.getLogger("")
  root_logger.setLevel(logging.NOTSET)
  root_logger.addHandler(stderr_handler)


93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
def CheckHostname(path):
  """Ensures hostname matches ssconf value.

  @param path: Path to ssconf file

  """
  ssconf_master_node = utils.ReadOneLineFile(path)
  hostname = netutils.GetHostname().name

  if ssconf_master_node == hostname:
    return True

  logging.warning("Warning: ssconf says master node is '%s', but this"
                  " machine's name is '%s'; this tool must be run on"
                  " the master node", ssconf_master_node, hostname)
  return False

110

111
112
def _FillIPolicySpecs(default_ipolicy, ipolicy):
  if "minmax" in ipolicy:
113
114
    for (key, spec) in ipolicy["minmax"][0].items():
      for (par, val) in default_ipolicy["minmax"][0][key].items():
115
116
117
118
119
        if par not in spec:
          spec[par] = val


def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
120
121
122
123
124
  minmax_keys = ["min", "max"]
  if any((k in ipolicy) for k in minmax_keys):
    minmax = {}
    for key in minmax_keys:
      if key in ipolicy:
125
126
        if ipolicy[key]:
          minmax[key] = ipolicy[key]
127
        del ipolicy[key]
128
    if minmax:
129
      ipolicy["minmax"] = [minmax]
130
131
132
  if isgroup and "std" in ipolicy:
    del ipolicy["std"]
  _FillIPolicySpecs(default_ipolicy, ipolicy)
133
134


Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
135
136
137
138
139
140
def UpgradeNetworks(config_data):
  networks = config_data.get("networks", None)
  if not networks:
    config_data["networks"] = {}


141
142
143
144
def UpgradeCluster(config_data):
  cluster = config_data.get("cluster", None)
  if cluster is None:
    raise Error("Cannot find cluster")
145
  ipolicy = cluster.setdefault("ipolicy", None)
146
  if ipolicy:
147
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
148
149


Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
150
def UpgradeGroups(config_data):
151
  cl_ipolicy = config_data["cluster"].get("ipolicy")
Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
152
153
154
155
  for group in config_data["nodegroups"].values():
    networks = group.get("networks", None)
    if not networks:
      group["networks"] = {}
156
157
    ipolicy = group.get("ipolicy", None)
    if ipolicy:
158
159
160
161
      if cl_ipolicy is None:
        raise Error("A group defines an instance policy but there is no"
                    " instance policy at cluster level")
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
162

163

164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
def GetExclusiveStorageValue(config_data):
  """Return a conservative value of the exclusive_storage flag.

  Return C{True} if the cluster or at least a nodegroup have the flag set.

  """
  ret = False
  cluster = config_data["cluster"]
  ndparams = cluster.get("ndparams")
  if ndparams is not None and ndparams.get("exclusive_storage"):
    ret = True
  for group in config_data["nodegroups"].values():
    ndparams = group.get("ndparams")
    if ndparams is not None and ndparams.get("exclusive_storage"):
      ret = True
  return ret


182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
def ChangeDiskDevType(disk, dev_type_map):
  """Replaces disk's dev_type attributes according to the given map.

  This can be used for both, up or downgrading the disks.
  """
  if disk["dev_type"] in dev_type_map:
    disk["dev_type"] = dev_type_map[disk["dev_type"]]
  if "children" in disk:
    for child in disk["children"]:
      ChangeDiskDevType(child, dev_type_map)


def UpgradeDiskDevType(disk):
  """Upgrades the disks' device type."""
  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)


199
def UpgradeInstances(config_data):
200
201
  """Upgrades the instances' configuration."""

202
203
  network2uuid = dict((n["name"], n["uuid"])
                      for n in config_data["networks"].values())
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
204
205
206
  if "instances" not in config_data:
    raise Error("Can't find the 'instances' key in the configuration!")

207
  missing_spindles = False
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
208
209
  for instance, iobj in config_data["instances"].items():
    for nic in iobj["nics"]:
210
211
212
213
214
215
216
217
      name = nic.get("network", None)
      if name:
        uuid = network2uuid.get(name, None)
        if uuid:
          print("NIC with network name %s found."
                " Substituting with uuid %s." % (name, uuid))
          nic["network"] = uuid

Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
218
219
220
221
222
223
224
225
226
227
228
    if "disks" not in iobj:
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
    disks = iobj["disks"]
    for idx, dobj in enumerate(disks):
      expected = "disk/%s" % idx
      current = dobj.get("iv_name", "")
      if current != expected:
        logging.warning("Updating iv_name for instance %s/disk %s"
                        " from '%s' to '%s'",
                        instance, idx, current, expected)
        dobj["iv_name"] = expected
229
230
231
232

      if "dev_type" in dobj:
        UpgradeDiskDevType(dobj)

233
234
235
236
237
238
239
240
241
242
243
      if not "spindles" in dobj:
        missing_spindles = True

  if GetExclusiveStorageValue(config_data) and missing_spindles:
    # We cannot be sure that the instances that are missing spindles have
    # exclusive storage enabled (the check would be more complicated), so we
    # give a noncommittal message
    logging.warning("Some instance disks could be needing to update the"
                    " spindles parameter; you can check by running"
                    " 'gnt-cluster verify', and fix any problem with"
                    " 'gnt-cluster repair-disk-sizes'")
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309


def UpgradeRapiUsers():
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
    if os.path.exists(options.RAPI_USERS_FILE):
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
                  " already exists at %s" %
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
    if not options.dry_run:
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
                       mkdir=True, mkdir_mode=0750)

  # Create a symlink for RAPI users file
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
      os.path.isfile(options.RAPI_USERS_FILE)):
    logging.info("Creating symlink from %s to %s",
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
    if not options.dry_run:
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)


def UpgradeWatcher():
  # Remove old watcher state file if it exists
  if os.path.exists(options.WATCHER_STATEFILE):
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
    if not options.dry_run:
      utils.RemoveFile(options.WATCHER_STATEFILE)


def UpgradeFileStoragePaths(config_data):
  # Write file storage paths
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
    cluster = config_data["cluster"]
    file_storage_dir = cluster.get("file_storage_dir")
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
    del cluster

    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
                 " for file storage; writing existing configuration values"
                 " into '%s'",
                 options.FILE_STORAGE_PATHS_FILE)

    if file_storage_dir:
      logging.info("File storage directory: %s", file_storage_dir)
    if shared_file_storage_dir:
      logging.info("Shared file storage directory: %s",
                   shared_file_storage_dir)

    buf = StringIO()
    buf.write("# List automatically generated from configuration by\n")
    buf.write("# cfgupgrade at %s\n" % time.asctime())
    if file_storage_dir:
      buf.write("%s\n" % file_storage_dir)
    if shared_file_storage_dir:
      buf.write("%s\n" % shared_file_storage_dir)
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
                    data=buf.getvalue(),
                    mode=0600,
                    dry_run=options.dry_run,
                    backup=True)


310
311
312
313
314
315
316
317
318
319
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
  if old_key not in nodes_by_old_key:
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
                    " already up-to-date", old_key)
    return old_key
  return nodes_by_old_key[old_key][new_key_field]


def ChangeNodeIndices(config_data, old_key_field, new_key_field):
  def ChangeDiskNodeIndices(disk):
320
321
322
323
324
    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
    # considered when up/downgrading from/to any versions touching 2.9 on the
    # way.
    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
    if disk["dev_type"] in drbd_disk_types:
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
      for i in range(0, 2):
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
                                                disk["logical_id"][i],
                                                new_key_field)
    if "children" in disk:
      for child in disk["children"]:
        ChangeDiskNodeIndices(child)

  nodes_by_old_key = {}
  nodes_by_new_key = {}
  for (_, node) in config_data["nodes"].items():
    nodes_by_old_key[node[old_key_field]] = node
    nodes_by_new_key[node[new_key_field]] = node

  config_data["nodes"] = nodes_by_new_key

  cluster = config_data["cluster"]
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
                                           cluster["master_node"],
                                           new_key_field)

  for inst in config_data["instances"].values():
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
                                           inst["primary_node"],
                                           new_key_field)
    for disk in inst["disks"]:
      ChangeDiskNodeIndices(disk)


354
355
356
357
358
359
360
361
362
363
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
  insts_by_old_key = {}
  insts_by_new_key = {}
  for (_, inst) in config_data["instances"].items():
    insts_by_old_key[inst[old_key_field]] = inst
    insts_by_new_key[inst[new_key_field]] = inst

  config_data["instances"] = insts_by_new_key


364
365
366
367
def UpgradeNodeIndices(config_data):
  ChangeNodeIndices(config_data, "name", "uuid")


368
369
370
371
def UpgradeInstanceIndices(config_data):
  ChangeInstanceIndices(config_data, "name", "uuid")


Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
372
373
374
375
376
377
378
def UpgradeAll(config_data):
  config_data["version"] = constants.BuildVersion(TARGET_MAJOR,
                                                  TARGET_MINOR, 0)
  UpgradeRapiUsers()
  UpgradeWatcher()
  UpgradeFileStoragePaths(config_data)
  UpgradeNetworks(config_data)
379
  UpgradeCluster(config_data)
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
380
381
  UpgradeGroups(config_data)
  UpgradeInstances(config_data)
382
  UpgradeNodeIndices(config_data)
383
  UpgradeInstanceIndices(config_data)
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
384

385

386
387
388
389
def DowngradeDiskDevType(disk):
  """Downgrades the disks' device type."""
  ChangeDiskDevType(disk, DEV_TYPE_NEW_OLD)

Helga Velroyen's avatar
Helga Velroyen committed
390

391
392
393
394
395
396
397
398
def DowngradeDisks(disks, owner):
  for disk in disks:
    # Remove spindles to downgrade to 2.8
    if "spindles" in disk:
      logging.warning("Removing spindles (value=%s) from disk %s (%s) of"
                      " instance %s",
                      disk["spindles"], disk["iv_name"], disk["uuid"], owner)
      del disk["spindles"]
399
400
    if "dev_type" in disk:
      DowngradeDiskDevType(disk)
401
402
403
404
405
406
407
408
409
410
411


def DowngradeInstances(config_data):
  if "instances" not in config_data:
    raise Error("Cannot find the 'instances' key in the configuration!")
  for (iname, iobj) in config_data["instances"].items():
    if "disks" not in iobj:
      raise Error("Cannot find 'disks' key for instance %s" % iname)
    DowngradeDisks(iobj["disks"], iname)


412
413
414
415
def DowngradeNodeIndices(config_data):
  ChangeNodeIndices(config_data, "uuid", "name")


416
417
418
419
def DowngradeInstanceIndices(config_data):
  ChangeInstanceIndices(config_data, "uuid", "name")


Helga Velroyen's avatar
Helga Velroyen committed
420
421
422
423
424
425
426
427
428
429
430
431
def DowngradeHvparams(config_data):
  """Downgrade the cluster's hypervisor parameters."""
  cluster = config_data["cluster"]
  if "hvparams" in cluster:
    hvparams = cluster["hvparams"]
    xen_params = None
    for xen_variant in [constants.HT_XEN_PVM, constants.HT_XEN_HVM]:
      if xen_variant in hvparams:
        xen_params = hvparams[xen_variant]
        # 'xen_cmd' was introduced in 2.9
        if constants.HV_XEN_CMD in xen_params:
          del xen_params[constants.HV_XEN_CMD]
Helga Velroyen's avatar
Helga Velroyen committed
432
433
434
        # 'vif_script' was introducted in 2.9
        if constants.HV_VIF_SCRIPT in xen_params:
          del xen_params[constants.HV_VIF_SCRIPT]
Helga Velroyen's avatar
Helga Velroyen committed
435
436


437
438
439
def DowngradeAll(config_data):
  # Any code specific to a particular version should be labeled that way, so
  # it can be removed when updating to the next version.
440
441
  config_data["version"] = constants.BuildVersion(DOWNGRADE_MAJOR,
                                                  DOWNGRADE_MINOR, 0)
442
  DowngradeInstances(config_data)
443
  DowngradeNodeIndices(config_data)
444
  DowngradeInstanceIndices(config_data)
Helga Velroyen's avatar
Helga Velroyen committed
445
  DowngradeHvparams(config_data)
446
447


448
449
450
451
def main():
  """Main program.

  """
452
  global options, args # pylint: disable=W0603
453

454
  # Option parsing
455
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
456
  parser.add_option("--dry-run", dest="dry_run",
457
                    action="store_true",
458
459
                    help="Try to do the conversion, but don't write"
                         " output file")
460
  parser.add_option(cli.FORCE_OPT)
461
  parser.add_option(cli.DEBUG_OPT)
462
  parser.add_option(cli.VERBOSE_OPT)
463
464
465
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
                    action="store_true", default=False,
                    help="Don't abort if hostname doesn't match")
466
  parser.add_option("--path", help="Convert configuration in this"
467
468
                    " directory instead of '%s'" % pathutils.DATA_DIR,
                    default=pathutils.DATA_DIR, dest="data_dir")
469
470
471
472
  parser.add_option("--confdir",
                    help=("Use this directory instead of '%s'" %
                          pathutils.CONF_DIR),
                    default=pathutils.CONF_DIR, dest="conf_dir")
473
474
475
  parser.add_option("--no-verify",
                    help="Do not verify configuration after upgrade",
                    action="store_true", dest="no_verify", default=False)
476
477
478
  parser.add_option("--downgrade",
                    help="Downgrade to the previous stable version",
                    action="store_true", dest="downgrade", default=False)
479
480
  (options, args) = parser.parse_args()

481
482
  # We need to keep filenames locally because they might be renamed between
  # versions.
483
  options.data_dir = os.path.abspath(options.data_dir)
484
485
486
487
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
488
489
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
490
491
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
492
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
493
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
494
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
495
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
496
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
497

498
499
  SetupLogging()

500
501
  # Option checking
  if args:
502
    raise Error("No arguments expected")
503
504
  if options.downgrade and not options.no_verify:
    options.no_verify = True
505

506
507
508
509
510
  # Check master name
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
    logging.error("Aborting due to hostname mismatch")
    sys.exit(constants.EXIT_FAILURE)

511
  if not options.force:
512
513
514
515
516
517
518
519
520
521
522
523
524
    if options.downgrade:
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
                  " Some configuration data might be removed if they don't fit"
                  " in the old format. Please make sure you have read the"
                  " upgrade notes (available in the UPGRADE file and included"
                  " in other documentation formats) to understand what they"
                  " are. Continue with *DOWNGRADING* the configuration?" %
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
    else:
      usertext = ("Please make sure you have read the upgrade notes for"
                  " Ganeti %s (available in the UPGRADE file and included"
                  " in other documentation formats). Continue with upgrading"
                  " configuration?" % constants.RELEASE_VERSION)
525
    if not cli.AskUser(usertext):
526
      sys.exit(constants.EXIT_FAILURE)
527

528
  # Check whether it's a Ganeti configuration directory
529
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
530
          os.path.isfile(options.SERVER_PEM_PATH) and
531
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
532
    raise Error(("%s does not seem to be a Ganeti configuration"
533
                 " directory") % options.data_dir)
534

535
536
537
  if not os.path.isdir(options.conf_dir):
    raise Error("Not a directory: %s" % options.conf_dir)

538
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
539

540
541
542
543
  try:
    config_version = config_data["version"]
  except KeyError:
    raise Error("Unable to determine configuration version")
544

545
546
  (config_major, config_minor, config_revision) = \
    constants.SplitVersion(config_version)
547

548
549
  logging.info("Found configuration version %s (%d.%d.%d)",
               config_version, config_major, config_minor, config_revision)
550

551
552
553
  if "config_version" in config_data["cluster"]:
    raise Error("Inconsistent configuration: found config_version in"
                " configuration file")
554

555
556
  # Downgrade to the previous stable version
  if options.downgrade:
Michele Tartara's avatar
Michele Tartara committed
557
558
559
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
            (config_major == DOWNGRADE_MAJOR and
             config_minor == DOWNGRADE_MINOR)):
560
561
562
563
564
565
      raise Error("Downgrade supported only from the latest version (%s.%s),"
                  " found %s (%s.%s.%s) instead" %
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
                   config_minor, config_revision))
    DowngradeAll(config_data)

Michele Tartara's avatar
Michele Tartara committed
566
567
  # Upgrade from 2.{0..7} to 2.9
  elif config_major == 2 and config_minor in range(0, 10):
568
    if config_revision != 0:
569
      logging.warning("Config revision is %s, not 0", config_revision)
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
570
    UpgradeAll(config_data)
571

572
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
573
574
575
576
577
    logging.info("No changes necessary")

  else:
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
                (config_major, config_minor, config_revision))
578

579
580
581
582
583
584
585
  try:
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
                    data=serializer.DumpJson(config_data),
                    mode=0600,
                    dry_run=options.dry_run,
                    backup=True)
586
587

    if not options.dry_run:
Iustin Pop's avatar
Iustin Pop committed
588
589
590
591
592
593
594
595
      bootstrap.GenerateClusterCrypto(
        False, False, False, False, False,
        nodecert_file=options.SERVER_PEM_PATH,
        rapicert_file=options.RAPI_CERT_FILE,
        spicecert_file=options.SPICE_CERT_FILE,
        spicecacert_file=options.SPICE_CACERT_FILE,
        hmackey_file=options.CONFD_HMAC_KEY,
        cds_file=options.CDS_FILE)
596

597
  except Exception:
598
    logging.critical("Writing configuration failed. It is probably in an"
599
600
                     " inconsistent state and needs manual intervention.")
    raise
601

602
  # test loading the config file
603
  all_ok = True
604
  if not (options.dry_run or options.no_verify):
605
606
    logging.info("Testing the new config file...")
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
607
                              accept_foreign=options.ignore_hostname,
608
609
610
611
612
613
                              offline=True)
    # if we reached this, it's all fine
    vrfy = cfg.VerifyConfig()
    if vrfy:
      logging.error("Errors after conversion:")
      for item in vrfy:
Iustin Pop's avatar
Iustin Pop committed
614
        logging.error(" - %s", item)
615
616
617
      all_ok = False
    else:
      logging.info("File loaded successfully after upgrading")
618
619
    del cfg

620
621
622
623
624
625
  if options.downgrade:
    action = "downgraded"
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
  else:
    action = "upgraded"
    out_ver = constants.RELEASE_VERSION
626
  if all_ok:
627
628
    cli.ToStderr("Configuration successfully %s to version %s.",
                 action, out_ver)
629
  else:
630
631
    cli.ToStderr("Configuration %s to version %s, but there are errors."
                 "\nPlease review the file.", action, out_ver)
632

633
634
635

if __name__ == "__main__":
  main()