cfgupgrade 21.6 KB
Newer Older
1
2
3
#!/usr/bin/python
#

4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Tool to upgrade the configuration file.

24
25
This code handles only the types supported by simplejson. As an
example, 'set' is a 'list'.
26
27
28
29
30
31
32
33

"""


import os
import os.path
import sys
import optparse
34
import logging
35
36
import time
from cStringIO import StringIO
37

38
39
from ganeti import constants
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

47
48
from ganeti.utils import version

49

50
51
options = None
args = None
52

Iustin Pop's avatar
Iustin Pop committed
53

54
55
56
#: Target major version we will upgrade to
TARGET_MAJOR = 2
#: Target minor version we will upgrade to
57
TARGET_MINOR = 12
58
59
60
#: Target major version for downgrade
DOWNGRADE_MAJOR = 2
#: Target minor version for downgrade
61
DOWNGRADE_MINOR = 11
62

63
64
65
66
# map of legacy device types
# (mapping differing old LD_* constants to new DT_* constants)
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
# (mapping differing new DT_* constants to old LD_* constants)
Helga Velroyen's avatar
Helga Velroyen committed
67
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
68

69

70
71
72
class Error(Exception):
  """Generic exception"""
  pass
73
74


75
76
77
78
79
80
81
82
83
84
85
86
87
def SetupLogging():
  """Configures the logging module.

  """
  formatter = logging.Formatter("%(asctime)s: %(message)s")

  stderr_handler = logging.StreamHandler()
  stderr_handler.setFormatter(formatter)
  if options.debug:
    stderr_handler.setLevel(logging.NOTSET)
  elif options.verbose:
    stderr_handler.setLevel(logging.INFO)
  else:
88
    stderr_handler.setLevel(logging.WARNING)
89
90
91
92
93
94

  root_logger = logging.getLogger("")
  root_logger.setLevel(logging.NOTSET)
  root_logger.addHandler(stderr_handler)


95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
def CheckHostname(path):
  """Ensures hostname matches ssconf value.

  @param path: Path to ssconf file

  """
  ssconf_master_node = utils.ReadOneLineFile(path)
  hostname = netutils.GetHostname().name

  if ssconf_master_node == hostname:
    return True

  logging.warning("Warning: ssconf says master node is '%s', but this"
                  " machine's name is '%s'; this tool must be run on"
                  " the master node", ssconf_master_node, hostname)
  return False

112

113
114
def _FillIPolicySpecs(default_ipolicy, ipolicy):
  if "minmax" in ipolicy:
115
116
    for (key, spec) in ipolicy["minmax"][0].items():
      for (par, val) in default_ipolicy["minmax"][0][key].items():
117
118
119
120
121
        if par not in spec:
          spec[par] = val


def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
122
123
124
125
126
  minmax_keys = ["min", "max"]
  if any((k in ipolicy) for k in minmax_keys):
    minmax = {}
    for key in minmax_keys:
      if key in ipolicy:
127
128
        if ipolicy[key]:
          minmax[key] = ipolicy[key]
129
        del ipolicy[key]
130
    if minmax:
131
      ipolicy["minmax"] = [minmax]
132
133
134
  if isgroup and "std" in ipolicy:
    del ipolicy["std"]
  _FillIPolicySpecs(default_ipolicy, ipolicy)
135
136


Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
137
138
139
140
141
142
def UpgradeNetworks(config_data):
  networks = config_data.get("networks", None)
  if not networks:
    config_data["networks"] = {}


143
144
145
146
def UpgradeCluster(config_data):
  cluster = config_data.get("cluster", None)
  if cluster is None:
    raise Error("Cannot find cluster")
147
  ipolicy = cluster.setdefault("ipolicy", None)
148
  if ipolicy:
149
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
150
151
152
  ial_params = cluster.get("default_iallocator_params", None)
  if not ial_params:
    cluster["default_iallocator_params"] = {}
153
154
  if not "candidate_certs" in cluster:
    cluster["candidate_certs"] = {}
155
156
  cluster["instance_communication_network"] = \
    cluster.get("instance_communication_network", "")
157
158


Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
159
def UpgradeGroups(config_data):
160
  cl_ipolicy = config_data["cluster"].get("ipolicy")
Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
161
162
163
164
  for group in config_data["nodegroups"].values():
    networks = group.get("networks", None)
    if not networks:
      group["networks"] = {}
165
166
    ipolicy = group.get("ipolicy", None)
    if ipolicy:
167
168
169
170
      if cl_ipolicy is None:
        raise Error("A group defines an instance policy but there is no"
                    " instance policy at cluster level")
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
171

172

173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
def GetExclusiveStorageValue(config_data):
  """Return a conservative value of the exclusive_storage flag.

  Return C{True} if the cluster or at least a nodegroup have the flag set.

  """
  ret = False
  cluster = config_data["cluster"]
  ndparams = cluster.get("ndparams")
  if ndparams is not None and ndparams.get("exclusive_storage"):
    ret = True
  for group in config_data["nodegroups"].values():
    ndparams = group.get("ndparams")
    if ndparams is not None and ndparams.get("exclusive_storage"):
      ret = True
  return ret


191
192
193
194
195
196
197
198
def RemovePhysicalId(disk):
  if "children" in disk:
    for d in disk["children"]:
      RemovePhysicalId(d)
  if "physical_id" in disk:
    del disk["physical_id"]


199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
def ChangeDiskDevType(disk, dev_type_map):
  """Replaces disk's dev_type attributes according to the given map.

  This can be used for both, up or downgrading the disks.
  """
  if disk["dev_type"] in dev_type_map:
    disk["dev_type"] = dev_type_map[disk["dev_type"]]
  if "children" in disk:
    for child in disk["children"]:
      ChangeDiskDevType(child, dev_type_map)


def UpgradeDiskDevType(disk):
  """Upgrades the disks' device type."""
  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)


216
def UpgradeInstances(config_data):
217
218
  """Upgrades the instances' configuration."""

219
220
  network2uuid = dict((n["name"], n["uuid"])
                      for n in config_data["networks"].values())
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
221
222
223
  if "instances" not in config_data:
    raise Error("Can't find the 'instances' key in the configuration!")

224
  missing_spindles = False
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
225
226
  for instance, iobj in config_data["instances"].items():
    for nic in iobj["nics"]:
227
228
229
230
231
232
233
234
      name = nic.get("network", None)
      if name:
        uuid = network2uuid.get(name, None)
        if uuid:
          print("NIC with network name %s found."
                " Substituting with uuid %s." % (name, uuid))
          nic["network"] = uuid

Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
235
236
237
238
    if "disks" not in iobj:
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
    disks = iobj["disks"]
    for idx, dobj in enumerate(disks):
239
240
      RemovePhysicalId(dobj)

Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
241
242
243
244
245
246
247
      expected = "disk/%s" % idx
      current = dobj.get("iv_name", "")
      if current != expected:
        logging.warning("Updating iv_name for instance %s/disk %s"
                        " from '%s' to '%s'",
                        instance, idx, current, expected)
        dobj["iv_name"] = expected
248
249
250
251

      if "dev_type" in dobj:
        UpgradeDiskDevType(dobj)

252
253
254
255
256
257
258
259
260
261
262
      if not "spindles" in dobj:
        missing_spindles = True

  if GetExclusiveStorageValue(config_data) and missing_spindles:
    # We cannot be sure that the instances that are missing spindles have
    # exclusive storage enabled (the check would be more complicated), so we
    # give a noncommittal message
    logging.warning("Some instance disks could be needing to update the"
                    " spindles parameter; you can check by running"
                    " 'gnt-cluster verify', and fix any problem with"
                    " 'gnt-cluster repair-disk-sizes'")
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328


def UpgradeRapiUsers():
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
    if os.path.exists(options.RAPI_USERS_FILE):
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
                  " already exists at %s" %
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
    if not options.dry_run:
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
                       mkdir=True, mkdir_mode=0750)

  # Create a symlink for RAPI users file
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
      os.path.isfile(options.RAPI_USERS_FILE)):
    logging.info("Creating symlink from %s to %s",
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
    if not options.dry_run:
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)


def UpgradeWatcher():
  # Remove old watcher state file if it exists
  if os.path.exists(options.WATCHER_STATEFILE):
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
    if not options.dry_run:
      utils.RemoveFile(options.WATCHER_STATEFILE)


def UpgradeFileStoragePaths(config_data):
  # Write file storage paths
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
    cluster = config_data["cluster"]
    file_storage_dir = cluster.get("file_storage_dir")
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
    del cluster

    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
                 " for file storage; writing existing configuration values"
                 " into '%s'",
                 options.FILE_STORAGE_PATHS_FILE)

    if file_storage_dir:
      logging.info("File storage directory: %s", file_storage_dir)
    if shared_file_storage_dir:
      logging.info("Shared file storage directory: %s",
                   shared_file_storage_dir)

    buf = StringIO()
    buf.write("# List automatically generated from configuration by\n")
    buf.write("# cfgupgrade at %s\n" % time.asctime())
    if file_storage_dir:
      buf.write("%s\n" % file_storage_dir)
    if shared_file_storage_dir:
      buf.write("%s\n" % shared_file_storage_dir)
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
                    data=buf.getvalue(),
                    mode=0600,
                    dry_run=options.dry_run,
                    backup=True)


329
330
331
332
333
334
335
336
337
338
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
  if old_key not in nodes_by_old_key:
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
                    " already up-to-date", old_key)
    return old_key
  return nodes_by_old_key[old_key][new_key_field]


def ChangeNodeIndices(config_data, old_key_field, new_key_field):
  def ChangeDiskNodeIndices(disk):
339
340
341
342
343
    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
    # considered when up/downgrading from/to any versions touching 2.9 on the
    # way.
    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
    if disk["dev_type"] in drbd_disk_types:
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
      for i in range(0, 2):
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
                                                disk["logical_id"][i],
                                                new_key_field)
    if "children" in disk:
      for child in disk["children"]:
        ChangeDiskNodeIndices(child)

  nodes_by_old_key = {}
  nodes_by_new_key = {}
  for (_, node) in config_data["nodes"].items():
    nodes_by_old_key[node[old_key_field]] = node
    nodes_by_new_key[node[new_key_field]] = node

  config_data["nodes"] = nodes_by_new_key

  cluster = config_data["cluster"]
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
                                           cluster["master_node"],
                                           new_key_field)

  for inst in config_data["instances"].values():
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
                                           inst["primary_node"],
                                           new_key_field)
    for disk in inst["disks"]:
      ChangeDiskNodeIndices(disk)


373
374
375
376
377
378
379
380
381
382
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
  insts_by_old_key = {}
  insts_by_new_key = {}
  for (_, inst) in config_data["instances"].items():
    insts_by_old_key[inst[old_key_field]] = inst
    insts_by_new_key[inst[new_key_field]] = inst

  config_data["instances"] = insts_by_new_key


383
384
385
386
def UpgradeNodeIndices(config_data):
  ChangeNodeIndices(config_data, "name", "uuid")


387
388
389
390
def UpgradeInstanceIndices(config_data):
  ChangeInstanceIndices(config_data, "name", "uuid")


Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
391
def UpgradeAll(config_data):
392
  config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
393
394
395
396
  UpgradeRapiUsers()
  UpgradeWatcher()
  UpgradeFileStoragePaths(config_data)
  UpgradeNetworks(config_data)
397
  UpgradeCluster(config_data)
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
398
399
  UpgradeGroups(config_data)
  UpgradeInstances(config_data)
400
  UpgradeNodeIndices(config_data)
401
  UpgradeInstanceIndices(config_data)
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
402

403

404
405
406
# DOWNGRADE ------------------------------------------------------------


407
408
409
410
411
412
413
414
def DowngradeCluster(config_data):
  cluster = config_data.get("cluster", None)
  if not cluster:
    raise Error("Cannot find the 'cluster' key in the configuration")

  if "osparams_private_cluster" in cluster:
    del cluster["osparams_private_cluster"]

415
416
417
  if "instance_communication_network" in cluster:
    del cluster["instance_communication_network"]

Klaus Aehlig's avatar
Klaus Aehlig committed
418

419
420
def DowngradeInstances(config_data):
  instances = config_data.get("instances", None)
421
  if instances is None:
422
    raise Error("Cannot find the 'instances' key in the configuration")
423

424
425
426
427
  for (_, iobj) in instances.items():
    if "osparams_private" in iobj:
      del iobj["osparams_private"]

Klaus Aehlig's avatar
Klaus Aehlig committed
428

429
430
431
def DowngradeAll(config_data):
  # Any code specific to a particular version should be labeled that way, so
  # it can be removed when updating to the next version.
432
433
  config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
                                                DOWNGRADE_MINOR, 0)
434
  DowngradeCluster(config_data)
435
  DowngradeInstances(config_data)
436
437


438
439
440
441
def main():
  """Main program.

  """
442
  global options, args # pylint: disable=W0603
443

444
  # Option parsing
445
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
446
  parser.add_option("--dry-run", dest="dry_run",
447
                    action="store_true",
448
449
                    help="Try to do the conversion, but don't write"
                         " output file")
450
  parser.add_option(cli.FORCE_OPT)
451
  parser.add_option(cli.DEBUG_OPT)
452
  parser.add_option(cli.VERBOSE_OPT)
453
454
455
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
                    action="store_true", default=False,
                    help="Don't abort if hostname doesn't match")
456
  parser.add_option("--path", help="Convert configuration in this"
457
458
                    " directory instead of '%s'" % pathutils.DATA_DIR,
                    default=pathutils.DATA_DIR, dest="data_dir")
459
460
461
462
  parser.add_option("--confdir",
                    help=("Use this directory instead of '%s'" %
                          pathutils.CONF_DIR),
                    default=pathutils.CONF_DIR, dest="conf_dir")
463
464
465
  parser.add_option("--no-verify",
                    help="Do not verify configuration after upgrade",
                    action="store_true", dest="no_verify", default=False)
466
467
468
  parser.add_option("--downgrade",
                    help="Downgrade to the previous stable version",
                    action="store_true", dest="downgrade", default=False)
469
470
  (options, args) = parser.parse_args()

471
472
  # We need to keep filenames locally because they might be renamed between
  # versions.
473
  options.data_dir = os.path.abspath(options.data_dir)
474
475
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
476
  options.CLIENT_PEM_PATH = options.data_dir + "/client.pem"
477
478
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
479
480
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
481
482
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
483
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
484
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
485
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
486
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
487
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
488

489
490
  SetupLogging()

491
492
  # Option checking
  if args:
493
    raise Error("No arguments expected")
494
495
  if options.downgrade and not options.no_verify:
    options.no_verify = True
496

497
498
499
500
501
  # Check master name
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
    logging.error("Aborting due to hostname mismatch")
    sys.exit(constants.EXIT_FAILURE)

502
  if not options.force:
503
504
505
506
507
508
509
510
511
512
513
514
515
    if options.downgrade:
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
                  " Some configuration data might be removed if they don't fit"
                  " in the old format. Please make sure you have read the"
                  " upgrade notes (available in the UPGRADE file and included"
                  " in other documentation formats) to understand what they"
                  " are. Continue with *DOWNGRADING* the configuration?" %
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
    else:
      usertext = ("Please make sure you have read the upgrade notes for"
                  " Ganeti %s (available in the UPGRADE file and included"
                  " in other documentation formats). Continue with upgrading"
                  " configuration?" % constants.RELEASE_VERSION)
516
    if not cli.AskUser(usertext):
517
      sys.exit(constants.EXIT_FAILURE)
518

519
  # Check whether it's a Ganeti configuration directory
520
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
521
          os.path.isfile(options.SERVER_PEM_PATH) and
522
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
523
    raise Error(("%s does not seem to be a Ganeti configuration"
524
                 " directory") % options.data_dir)
525

526
527
528
  if not os.path.isdir(options.conf_dir):
    raise Error("Not a directory: %s" % options.conf_dir)

529
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
530

531
532
533
534
  try:
    config_version = config_data["version"]
  except KeyError:
    raise Error("Unable to determine configuration version")
535

536
  (config_major, config_minor, config_revision) = \
537
    version.SplitVersion(config_version)
538

539
540
  logging.info("Found configuration version %s (%d.%d.%d)",
               config_version, config_major, config_minor, config_revision)
541

542
543
544
  if "config_version" in config_data["cluster"]:
    raise Error("Inconsistent configuration: found config_version in"
                " configuration file")
545

546
547
  # Downgrade to the previous stable version
  if options.downgrade:
Michele Tartara's avatar
Michele Tartara committed
548
549
550
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
            (config_major == DOWNGRADE_MAJOR and
             config_minor == DOWNGRADE_MINOR)):
551
552
553
554
555
556
      raise Error("Downgrade supported only from the latest version (%s.%s),"
                  " found %s (%s.%s.%s) instead" %
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
                   config_minor, config_revision))
    DowngradeAll(config_data)

557
558
  # Upgrade from 2.{0..10} to 2.12
  elif config_major == 2 and config_minor in range(0, 12):
559
    if config_revision != 0:
560
      logging.warning("Config revision is %s, not 0", config_revision)
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
561
    UpgradeAll(config_data)
562

563
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
564
565
566
567
568
    logging.info("No changes necessary")

  else:
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
                (config_major, config_minor, config_revision))
569

570
571
572
573
574
575
576
  try:
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
                    data=serializer.DumpJson(config_data),
                    mode=0600,
                    dry_run=options.dry_run,
                    backup=True)
577
578

    if not options.dry_run:
Iustin Pop's avatar
Iustin Pop committed
579
      bootstrap.GenerateClusterCrypto(
580
        False, False, False, False, False,
Iustin Pop's avatar
Iustin Pop committed
581
582
583
584
585
586
        nodecert_file=options.SERVER_PEM_PATH,
        rapicert_file=options.RAPI_CERT_FILE,
        spicecert_file=options.SPICE_CERT_FILE,
        spicecacert_file=options.SPICE_CACERT_FILE,
        hmackey_file=options.CONFD_HMAC_KEY,
        cds_file=options.CDS_FILE)
587

588
  except Exception:
589
    logging.critical("Writing configuration failed. It is probably in an"
590
591
                     " inconsistent state and needs manual intervention.")
    raise
592

593
  # test loading the config file
594
  all_ok = True
595
  if not (options.dry_run or options.no_verify):
596
597
    logging.info("Testing the new config file...")
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
598
                              accept_foreign=options.ignore_hostname,
599
600
601
602
603
604
                              offline=True)
    # if we reached this, it's all fine
    vrfy = cfg.VerifyConfig()
    if vrfy:
      logging.error("Errors after conversion:")
      for item in vrfy:
Iustin Pop's avatar
Iustin Pop committed
605
        logging.error(" - %s", item)
606
607
608
      all_ok = False
    else:
      logging.info("File loaded successfully after upgrading")
609
610
    del cfg

611
612
613
614
615
616
  if options.downgrade:
    action = "downgraded"
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
  else:
    action = "upgraded"
    out_ver = constants.RELEASE_VERSION
617
  if all_ok:
618
619
    cli.ToStderr("Configuration successfully %s to version %s.",
                 action, out_ver)
620
  else:
621
622
    cli.ToStderr("Configuration %s to version %s, but there are errors."
                 "\nPlease review the file.", action, out_ver)
623

624
625
626

if __name__ == "__main__":
  main()