cfgupgrade 21.4 KB
Newer Older
1 2 3
#!/usr/bin/python
#

4
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Tool to upgrade the configuration file.

24 25
This code handles only the types supported by simplejson. As an
example, 'set' is a 'list'.
26 27 28 29 30 31 32 33

"""


import os
import os.path
import sys
import optparse
34
import logging
35 36
import time
from cStringIO import StringIO
37

38 39
from ganeti import constants
from ganeti import serializer
40
from ganeti import utils
41
from ganeti import cli
42
from ganeti import bootstrap
43
from ganeti import config
44
from ganeti import netutils
45
from ganeti import pathutils
46

47 48
from ganeti.utils import version

49

50 51
options = None
args = None
52

Iustin Pop's avatar
Iustin Pop committed
53

54 55 56
#: Target major version we will upgrade to
TARGET_MAJOR = 2
#: Target minor version we will upgrade to
57
TARGET_MINOR = 12
58 59 60
#: Target major version for downgrade
DOWNGRADE_MAJOR = 2
#: Target minor version for downgrade
61
DOWNGRADE_MINOR = 11
62

63 64 65 66
# map of legacy device types
# (mapping differing old LD_* constants to new DT_* constants)
DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
# (mapping differing new DT_* constants to old LD_* constants)
Helga Velroyen's avatar
Helga Velroyen committed
67
DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
68

69

70 71 72
class Error(Exception):
  """Generic exception"""
  pass
73 74


75 76 77 78 79 80 81 82 83 84 85 86 87
def SetupLogging():
  """Configures the logging module.

  """
  formatter = logging.Formatter("%(asctime)s: %(message)s")

  stderr_handler = logging.StreamHandler()
  stderr_handler.setFormatter(formatter)
  if options.debug:
    stderr_handler.setLevel(logging.NOTSET)
  elif options.verbose:
    stderr_handler.setLevel(logging.INFO)
  else:
88
    stderr_handler.setLevel(logging.WARNING)
89 90 91 92 93 94

  root_logger = logging.getLogger("")
  root_logger.setLevel(logging.NOTSET)
  root_logger.addHandler(stderr_handler)


95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
def CheckHostname(path):
  """Ensures hostname matches ssconf value.

  @param path: Path to ssconf file

  """
  ssconf_master_node = utils.ReadOneLineFile(path)
  hostname = netutils.GetHostname().name

  if ssconf_master_node == hostname:
    return True

  logging.warning("Warning: ssconf says master node is '%s', but this"
                  " machine's name is '%s'; this tool must be run on"
                  " the master node", ssconf_master_node, hostname)
  return False

112

113 114
def _FillIPolicySpecs(default_ipolicy, ipolicy):
  if "minmax" in ipolicy:
115 116
    for (key, spec) in ipolicy["minmax"][0].items():
      for (par, val) in default_ipolicy["minmax"][0][key].items():
117 118 119 120 121
        if par not in spec:
          spec[par] = val


def UpgradeIPolicy(ipolicy, default_ipolicy, isgroup):
122 123 124 125 126
  minmax_keys = ["min", "max"]
  if any((k in ipolicy) for k in minmax_keys):
    minmax = {}
    for key in minmax_keys:
      if key in ipolicy:
127 128
        if ipolicy[key]:
          minmax[key] = ipolicy[key]
129
        del ipolicy[key]
130
    if minmax:
131
      ipolicy["minmax"] = [minmax]
132 133 134
  if isgroup and "std" in ipolicy:
    del ipolicy["std"]
  _FillIPolicySpecs(default_ipolicy, ipolicy)
135 136


Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
137 138 139 140 141 142
def UpgradeNetworks(config_data):
  networks = config_data.get("networks", None)
  if not networks:
    config_data["networks"] = {}


143 144 145 146
def UpgradeCluster(config_data):
  cluster = config_data.get("cluster", None)
  if cluster is None:
    raise Error("Cannot find cluster")
147
  ipolicy = cluster.setdefault("ipolicy", None)
148
  if ipolicy:
149
    UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
150 151 152
  ial_params = cluster.get("default_iallocator_params", None)
  if not ial_params:
    cluster["default_iallocator_params"] = {}
153 154
  if not "candidate_certs" in cluster:
    cluster["candidate_certs"] = {}
155 156


Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
157
def UpgradeGroups(config_data):
158
  cl_ipolicy = config_data["cluster"].get("ipolicy")
Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
159 160 161 162
  for group in config_data["nodegroups"].values():
    networks = group.get("networks", None)
    if not networks:
      group["networks"] = {}
163 164
    ipolicy = group.get("ipolicy", None)
    if ipolicy:
165 166 167 168
      if cl_ipolicy is None:
        raise Error("A group defines an instance policy but there is no"
                    " instance policy at cluster level")
      UpgradeIPolicy(ipolicy, cl_ipolicy, True)
Dimitris Aragiorgis's avatar
Dimitris Aragiorgis committed
169

170

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
def GetExclusiveStorageValue(config_data):
  """Return a conservative value of the exclusive_storage flag.

  Return C{True} if the cluster or at least a nodegroup have the flag set.

  """
  ret = False
  cluster = config_data["cluster"]
  ndparams = cluster.get("ndparams")
  if ndparams is not None and ndparams.get("exclusive_storage"):
    ret = True
  for group in config_data["nodegroups"].values():
    ndparams = group.get("ndparams")
    if ndparams is not None and ndparams.get("exclusive_storage"):
      ret = True
  return ret


189 190 191 192 193 194 195 196
def RemovePhysicalId(disk):
  if "children" in disk:
    for d in disk["children"]:
      RemovePhysicalId(d)
  if "physical_id" in disk:
    del disk["physical_id"]


197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
def ChangeDiskDevType(disk, dev_type_map):
  """Replaces disk's dev_type attributes according to the given map.

  This can be used for both, up or downgrading the disks.
  """
  if disk["dev_type"] in dev_type_map:
    disk["dev_type"] = dev_type_map[disk["dev_type"]]
  if "children" in disk:
    for child in disk["children"]:
      ChangeDiskDevType(child, dev_type_map)


def UpgradeDiskDevType(disk):
  """Upgrades the disks' device type."""
  ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)


214
def UpgradeInstances(config_data):
215 216
  """Upgrades the instances' configuration."""

217 218
  network2uuid = dict((n["name"], n["uuid"])
                      for n in config_data["networks"].values())
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
219 220 221
  if "instances" not in config_data:
    raise Error("Can't find the 'instances' key in the configuration!")

222
  missing_spindles = False
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
223 224
  for instance, iobj in config_data["instances"].items():
    for nic in iobj["nics"]:
225 226 227 228 229 230 231 232
      name = nic.get("network", None)
      if name:
        uuid = network2uuid.get(name, None)
        if uuid:
          print("NIC with network name %s found."
                " Substituting with uuid %s." % (name, uuid))
          nic["network"] = uuid

Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
233 234 235 236
    if "disks" not in iobj:
      raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
    disks = iobj["disks"]
    for idx, dobj in enumerate(disks):
237 238
      RemovePhysicalId(dobj)

Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
239 240 241 242 243 244 245
      expected = "disk/%s" % idx
      current = dobj.get("iv_name", "")
      if current != expected:
        logging.warning("Updating iv_name for instance %s/disk %s"
                        " from '%s' to '%s'",
                        instance, idx, current, expected)
        dobj["iv_name"] = expected
246 247 248 249

      if "dev_type" in dobj:
        UpgradeDiskDevType(dobj)

250 251 252 253 254 255 256 257 258 259 260
      if not "spindles" in dobj:
        missing_spindles = True

  if GetExclusiveStorageValue(config_data) and missing_spindles:
    # We cannot be sure that the instances that are missing spindles have
    # exclusive storage enabled (the check would be more complicated), so we
    # give a noncommittal message
    logging.warning("Some instance disks could be needing to update the"
                    " spindles parameter; you can check by running"
                    " 'gnt-cluster verify', and fix any problem with"
                    " 'gnt-cluster repair-disk-sizes'")
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326


def UpgradeRapiUsers():
  if (os.path.isfile(options.RAPI_USERS_FILE_PRE24) and
      not os.path.islink(options.RAPI_USERS_FILE_PRE24)):
    if os.path.exists(options.RAPI_USERS_FILE):
      raise Error("Found pre-2.4 RAPI users file at %s, but another file"
                  " already exists at %s" %
                  (options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE))
    logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
    if not options.dry_run:
      utils.RenameFile(options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE,
                       mkdir=True, mkdir_mode=0750)

  # Create a symlink for RAPI users file
  if (not (os.path.islink(options.RAPI_USERS_FILE_PRE24) or
           os.path.isfile(options.RAPI_USERS_FILE_PRE24)) and
      os.path.isfile(options.RAPI_USERS_FILE)):
    logging.info("Creating symlink from %s to %s",
                 options.RAPI_USERS_FILE_PRE24, options.RAPI_USERS_FILE)
    if not options.dry_run:
      os.symlink(options.RAPI_USERS_FILE, options.RAPI_USERS_FILE_PRE24)


def UpgradeWatcher():
  # Remove old watcher state file if it exists
  if os.path.exists(options.WATCHER_STATEFILE):
    logging.info("Removing watcher state file %s", options.WATCHER_STATEFILE)
    if not options.dry_run:
      utils.RemoveFile(options.WATCHER_STATEFILE)


def UpgradeFileStoragePaths(config_data):
  # Write file storage paths
  if not os.path.exists(options.FILE_STORAGE_PATHS_FILE):
    cluster = config_data["cluster"]
    file_storage_dir = cluster.get("file_storage_dir")
    shared_file_storage_dir = cluster.get("shared_file_storage_dir")
    del cluster

    logging.info("Ganeti 2.7 and later only allow whitelisted directories"
                 " for file storage; writing existing configuration values"
                 " into '%s'",
                 options.FILE_STORAGE_PATHS_FILE)

    if file_storage_dir:
      logging.info("File storage directory: %s", file_storage_dir)
    if shared_file_storage_dir:
      logging.info("Shared file storage directory: %s",
                   shared_file_storage_dir)

    buf = StringIO()
    buf.write("# List automatically generated from configuration by\n")
    buf.write("# cfgupgrade at %s\n" % time.asctime())
    if file_storage_dir:
      buf.write("%s\n" % file_storage_dir)
    if shared_file_storage_dir:
      buf.write("%s\n" % shared_file_storage_dir)
    utils.WriteFile(file_name=options.FILE_STORAGE_PATHS_FILE,
                    data=buf.getvalue(),
                    mode=0600,
                    dry_run=options.dry_run,
                    backup=True)


327 328 329 330 331 332 333 334 335 336
def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
  if old_key not in nodes_by_old_key:
    logging.warning("Can't find node '%s' in configuration, assuming that it's"
                    " already up-to-date", old_key)
    return old_key
  return nodes_by_old_key[old_key][new_key_field]


def ChangeNodeIndices(config_data, old_key_field, new_key_field):
  def ChangeDiskNodeIndices(disk):
337 338 339 340 341
    # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be
    # considered when up/downgrading from/to any versions touching 2.9 on the
    # way.
    drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
    if disk["dev_type"] in drbd_disk_types:
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
      for i in range(0, 2):
        disk["logical_id"][i] = GetNewNodeIndex(nodes_by_old_key,
                                                disk["logical_id"][i],
                                                new_key_field)
    if "children" in disk:
      for child in disk["children"]:
        ChangeDiskNodeIndices(child)

  nodes_by_old_key = {}
  nodes_by_new_key = {}
  for (_, node) in config_data["nodes"].items():
    nodes_by_old_key[node[old_key_field]] = node
    nodes_by_new_key[node[new_key_field]] = node

  config_data["nodes"] = nodes_by_new_key

  cluster = config_data["cluster"]
  cluster["master_node"] = GetNewNodeIndex(nodes_by_old_key,
                                           cluster["master_node"],
                                           new_key_field)

  for inst in config_data["instances"].values():
    inst["primary_node"] = GetNewNodeIndex(nodes_by_old_key,
                                           inst["primary_node"],
                                           new_key_field)
    for disk in inst["disks"]:
      ChangeDiskNodeIndices(disk)


371 372 373 374 375 376 377 378 379 380
def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
  insts_by_old_key = {}
  insts_by_new_key = {}
  for (_, inst) in config_data["instances"].items():
    insts_by_old_key[inst[old_key_field]] = inst
    insts_by_new_key[inst[new_key_field]] = inst

  config_data["instances"] = insts_by_new_key


381 382 383 384
def UpgradeNodeIndices(config_data):
  ChangeNodeIndices(config_data, "name", "uuid")


385 386 387 388
def UpgradeInstanceIndices(config_data):
  ChangeInstanceIndices(config_data, "name", "uuid")


Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
389
def UpgradeAll(config_data):
390
  config_data["version"] = version.BuildVersion(TARGET_MAJOR, TARGET_MINOR, 0)
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
391 392 393 394
  UpgradeRapiUsers()
  UpgradeWatcher()
  UpgradeFileStoragePaths(config_data)
  UpgradeNetworks(config_data)
395
  UpgradeCluster(config_data)
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
396 397
  UpgradeGroups(config_data)
  UpgradeInstances(config_data)
398
  UpgradeNodeIndices(config_data)
399
  UpgradeInstanceIndices(config_data)
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
400

401

402 403 404
# DOWNGRADE ------------------------------------------------------------


405 406 407 408 409 410 411 412
def DowngradeCluster(config_data):
  cluster = config_data.get("cluster", None)
  if not cluster:
    raise Error("Cannot find the 'cluster' key in the configuration")

  if "osparams_private_cluster" in cluster:
    del cluster["osparams_private_cluster"]

Klaus Aehlig's avatar
Klaus Aehlig committed
413

414 415
def DowngradeInstances(config_data):
  instances = config_data.get("instances", None)
416
  if instances is None:
417 418 419 420 421
    raise Error("Cannot find the 'instances' key in the configuration")
  for (_, iobj) in instances.items():
    if "osparams_private" in iobj:
      del iobj["osparams_private"]

Klaus Aehlig's avatar
Klaus Aehlig committed
422

423 424 425
def DowngradeAll(config_data):
  # Any code specific to a particular version should be labeled that way, so
  # it can be removed when updating to the next version.
426 427
  config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
                                                DOWNGRADE_MINOR, 0)
428
  DowngradeCluster(config_data)
429
  DowngradeInstances(config_data)
430 431


432 433 434 435
def main():
  """Main program.

  """
436
  global options, args # pylint: disable=W0603
437

438
  # Option parsing
439
  parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
440
  parser.add_option("--dry-run", dest="dry_run",
441
                    action="store_true",
442 443
                    help="Try to do the conversion, but don't write"
                         " output file")
444
  parser.add_option(cli.FORCE_OPT)
445
  parser.add_option(cli.DEBUG_OPT)
446
  parser.add_option(cli.VERBOSE_OPT)
447 448 449
  parser.add_option("--ignore-hostname", dest="ignore_hostname",
                    action="store_true", default=False,
                    help="Don't abort if hostname doesn't match")
450
  parser.add_option("--path", help="Convert configuration in this"
451 452
                    " directory instead of '%s'" % pathutils.DATA_DIR,
                    default=pathutils.DATA_DIR, dest="data_dir")
453 454 455 456
  parser.add_option("--confdir",
                    help=("Use this directory instead of '%s'" %
                          pathutils.CONF_DIR),
                    default=pathutils.CONF_DIR, dest="conf_dir")
457 458 459
  parser.add_option("--no-verify",
                    help="Do not verify configuration after upgrade",
                    action="store_true", dest="no_verify", default=False)
460 461 462
  parser.add_option("--downgrade",
                    help="Downgrade to the previous stable version",
                    action="store_true", dest="downgrade", default=False)
463 464
  (options, args) = parser.parse_args()

465 466
  # We need to keep filenames locally because they might be renamed between
  # versions.
467
  options.data_dir = os.path.abspath(options.data_dir)
468 469
  options.CONFIG_DATA_PATH = options.data_dir + "/config.data"
  options.SERVER_PEM_PATH = options.data_dir + "/server.pem"
470
  options.CLIENT_PEM_PATH = options.data_dir + "/client.pem"
471 472
  options.KNOWN_HOSTS_PATH = options.data_dir + "/known_hosts"
  options.RAPI_CERT_FILE = options.data_dir + "/rapi.pem"
473 474
  options.SPICE_CERT_FILE = options.data_dir + "/spice.pem"
  options.SPICE_CACERT_FILE = options.data_dir + "/spice-ca.pem"
475 476
  options.RAPI_USERS_FILE = options.data_dir + "/rapi/users"
  options.RAPI_USERS_FILE_PRE24 = options.data_dir + "/rapi_users"
477
  options.CONFD_HMAC_KEY = options.data_dir + "/hmac.key"
478
  options.CDS_FILE = options.data_dir + "/cluster-domain-secret"
479
  options.SSCONF_MASTER_NODE = options.data_dir + "/ssconf_master_node"
480
  options.WATCHER_STATEFILE = options.data_dir + "/watcher.data"
481
  options.FILE_STORAGE_PATHS_FILE = options.conf_dir + "/file-storage-paths"
482

483 484
  SetupLogging()

485 486
  # Option checking
  if args:
487
    raise Error("No arguments expected")
488 489
  if options.downgrade and not options.no_verify:
    options.no_verify = True
490

491 492 493 494 495
  # Check master name
  if not (CheckHostname(options.SSCONF_MASTER_NODE) or options.ignore_hostname):
    logging.error("Aborting due to hostname mismatch")
    sys.exit(constants.EXIT_FAILURE)

496
  if not options.force:
497 498 499 500 501 502 503 504 505 506 507 508 509
    if options.downgrade:
      usertext = ("The configuration is going to be DOWNGRADED to version %s.%s"
                  " Some configuration data might be removed if they don't fit"
                  " in the old format. Please make sure you have read the"
                  " upgrade notes (available in the UPGRADE file and included"
                  " in other documentation formats) to understand what they"
                  " are. Continue with *DOWNGRADING* the configuration?" %
                  (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
    else:
      usertext = ("Please make sure you have read the upgrade notes for"
                  " Ganeti %s (available in the UPGRADE file and included"
                  " in other documentation formats). Continue with upgrading"
                  " configuration?" % constants.RELEASE_VERSION)
510
    if not cli.AskUser(usertext):
511
      sys.exit(constants.EXIT_FAILURE)
512

513
  # Check whether it's a Ganeti configuration directory
514
  if not (os.path.isfile(options.CONFIG_DATA_PATH) and
515
          os.path.isfile(options.SERVER_PEM_PATH) and
516
          os.path.isfile(options.KNOWN_HOSTS_PATH)):
517
    raise Error(("%s does not seem to be a Ganeti configuration"
518
                 " directory") % options.data_dir)
519

520 521 522
  if not os.path.isdir(options.conf_dir):
    raise Error("Not a directory: %s" % options.conf_dir)

523
  config_data = serializer.LoadJson(utils.ReadFile(options.CONFIG_DATA_PATH))
524

525 526 527 528
  try:
    config_version = config_data["version"]
  except KeyError:
    raise Error("Unable to determine configuration version")
529

530
  (config_major, config_minor, config_revision) = \
531
    version.SplitVersion(config_version)
532

533 534
  logging.info("Found configuration version %s (%d.%d.%d)",
               config_version, config_major, config_minor, config_revision)
535

536 537 538
  if "config_version" in config_data["cluster"]:
    raise Error("Inconsistent configuration: found config_version in"
                " configuration file")
539

540 541
  # Downgrade to the previous stable version
  if options.downgrade:
Michele Tartara's avatar
Michele Tartara committed
542 543 544
    if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or
            (config_major == DOWNGRADE_MAJOR and
             config_minor == DOWNGRADE_MINOR)):
545 546 547 548 549 550
      raise Error("Downgrade supported only from the latest version (%s.%s),"
                  " found %s (%s.%s.%s) instead" %
                  (TARGET_MAJOR, TARGET_MINOR, config_version, config_major,
                   config_minor, config_revision))
    DowngradeAll(config_data)

551 552
  # Upgrade from 2.{0..10} to 2.12
  elif config_major == 2 and config_minor in range(0, 12):
553
    if config_revision != 0:
554
      logging.warning("Config revision is %s, not 0", config_revision)
Bernardo Dal Seno's avatar
Bernardo Dal Seno committed
555
    UpgradeAll(config_data)
556

557
  elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
558 559 560 561 562
    logging.info("No changes necessary")

  else:
    raise Error("Configuration version %d.%d.%d not supported by this tool" %
                (config_major, config_minor, config_revision))
563

564 565 566 567 568 569 570
  try:
    logging.info("Writing configuration file to %s", options.CONFIG_DATA_PATH)
    utils.WriteFile(file_name=options.CONFIG_DATA_PATH,
                    data=serializer.DumpJson(config_data),
                    mode=0600,
                    dry_run=options.dry_run,
                    backup=True)
571 572

    if not options.dry_run:
Iustin Pop's avatar
Iustin Pop committed
573
      bootstrap.GenerateClusterCrypto(
574
        False, False, False, False, False,
Iustin Pop's avatar
Iustin Pop committed
575 576 577 578 579 580
        nodecert_file=options.SERVER_PEM_PATH,
        rapicert_file=options.RAPI_CERT_FILE,
        spicecert_file=options.SPICE_CERT_FILE,
        spicecacert_file=options.SPICE_CACERT_FILE,
        hmackey_file=options.CONFD_HMAC_KEY,
        cds_file=options.CDS_FILE)
581

582
  except Exception:
583
    logging.critical("Writing configuration failed. It is probably in an"
584 585
                     " inconsistent state and needs manual intervention.")
    raise
586

587
  # test loading the config file
588
  all_ok = True
589
  if not (options.dry_run or options.no_verify):
590 591
    logging.info("Testing the new config file...")
    cfg = config.ConfigWriter(cfg_file=options.CONFIG_DATA_PATH,
592
                              accept_foreign=options.ignore_hostname,
593 594 595 596 597 598
                              offline=True)
    # if we reached this, it's all fine
    vrfy = cfg.VerifyConfig()
    if vrfy:
      logging.error("Errors after conversion:")
      for item in vrfy:
Iustin Pop's avatar
Iustin Pop committed
599
        logging.error(" - %s", item)
600 601 602
      all_ok = False
    else:
      logging.info("File loaded successfully after upgrading")
603 604
    del cfg

605 606 607 608 609 610
  if options.downgrade:
    action = "downgraded"
    out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
  else:
    action = "upgraded"
    out_ver = constants.RELEASE_VERSION
611
  if all_ok:
612 613
    cli.ToStderr("Configuration successfully %s to version %s.",
                 action, out_ver)
614
  else:
615 616
    cli.ToStderr("Configuration %s to version %s, but there are errors."
                 "\nPlease review the file.", action, out_ver)
617

618 619 620

if __name__ == "__main__":
  main()