cmdlib.py 191 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
#
Iustin Pop's avatar
Iustin Pop committed
2 3
#

4
# Copyright (C) 2006, 2007, 2008 Google Inc.
Iustin Pop's avatar
Iustin Pop committed
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


22
"""Module implementing the master-side code."""
Iustin Pop's avatar
Iustin Pop committed
23 24 25 26 27 28 29 30 31 32

# pylint: disable-msg=W0613,W0201

import os
import os.path
import sha
import time
import tempfile
import re
import platform
33
import logging
Iustin Pop's avatar
Iustin Pop committed
34 35 36 37 38 39 40

from ganeti import rpc
from ganeti import ssh
from ganeti import logger
from ganeti import utils
from ganeti import errors
from ganeti import hypervisor
Guido Trotter's avatar
Guido Trotter committed
41
from ganeti import locking
Iustin Pop's avatar
Iustin Pop committed
42 43 44
from ganeti import constants
from ganeti import objects
from ganeti import opcodes
45
from ganeti import serializer
46 47


Iustin Pop's avatar
Iustin Pop committed
48
class LogicalUnit(object):
49
  """Logical Unit base class.
Iustin Pop's avatar
Iustin Pop committed
50 51

  Subclasses must follow these rules:
52 53
    - implement ExpandNames
    - implement CheckPrereq
Iustin Pop's avatar
Iustin Pop committed
54 55 56
    - implement Exec
    - implement BuildHooksEnv
    - redefine HPATH and HTYPE
57 58
    - optionally redefine their run requirements:
        REQ_MASTER: the LU needs to run on the master node
59
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60 61

  Note that all commands require root permissions.
Iustin Pop's avatar
Iustin Pop committed
62 63 64 65 66 67

  """
  HPATH = None
  HTYPE = None
  _OP_REQP = []
  REQ_MASTER = True
68
  REQ_BGL = True
Iustin Pop's avatar
Iustin Pop committed
69

70
  def __init__(self, processor, op, context):
Iustin Pop's avatar
Iustin Pop committed
71 72 73 74 75 76
    """Constructor for LogicalUnit.

    This needs to be overriden in derived classes in order to check op
    validity.

    """
Iustin Pop's avatar
Iustin Pop committed
77
    self.proc = processor
Iustin Pop's avatar
Iustin Pop committed
78
    self.op = op
Guido Trotter's avatar
Guido Trotter committed
79 80
    self.cfg = context.cfg
    self.context = context
81
    # Dicts used to declare locking needs to mcpu
82
    self.needed_locks = None
83
    self.acquired_locks = {}
Guido Trotter's avatar
Guido Trotter committed
84
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85 86
    self.add_locks = {}
    self.remove_locks = {}
87 88
    # Used to force good behavior when calling helper functions
    self.recalculate_locks = {}
89 90
    self.__ssh = None

Iustin Pop's avatar
Iustin Pop committed
91 92 93
    for attr_name in self._OP_REQP:
      attr_val = getattr(op, attr_name, None)
      if attr_val is None:
94 95
        raise errors.OpPrereqError("Required parameter '%s' missing" %
                                   attr_name)
96

Guido Trotter's avatar
Guido Trotter committed
97
    if not self.cfg.IsCluster():
98 99 100
      raise errors.OpPrereqError("Cluster not initialized yet,"
                                 " use 'gnt-cluster init' first.")
    if self.REQ_MASTER:
Michael Hanselmann's avatar
Michael Hanselmann committed
101
      master = self.cfg.GetMasterNode()
102 103 104
      if master != utils.HostInfo().name:
        raise errors.OpPrereqError("Commands must be run on the master"
                                   " node %s" % master)
Iustin Pop's avatar
Iustin Pop committed
105

106 107 108 109 110
  def __GetSSH(self):
    """Returns the SshRunner object

    """
    if not self.__ssh:
111
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
112 113 114 115
    return self.__ssh

  ssh = property(fget=__GetSSH)

116 117 118 119 120 121 122 123 124 125 126 127 128 129
  def ExpandNames(self):
    """Expand names for this LU.

    This method is called before starting to execute the opcode, and it should
    update all the parameters of the opcode to their canonical form (e.g. a
    short node name must be fully expanded after this method has successfully
    completed). This way locking, hooks, logging, ecc. can work correctly.

    LUs which implement this method must also populate the self.needed_locks
    member, as a dict with lock levels as keys, and a list of needed lock names
    as values. Rules:
      - Use an empty dict if you don't need any lock
      - If you don't need any lock at a particular level omit that level
      - Don't put anything for the BGL level
130
      - If you want all locks at a level use locking.ALL_SET as a value
131

Guido Trotter's avatar
Guido Trotter committed
132 133 134 135
    If you need to share locks (rather than acquire them exclusively) at one
    level you can modify self.share_locks, setting a true value (usually 1) for
    that level. By default locks are not shared.

136 137 138
    Examples:
    # Acquire all nodes and one instance
    self.needed_locks = {
139
      locking.LEVEL_NODE: locking.ALL_SET,
140
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
    }
    # Acquire just two nodes
    self.needed_locks = {
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
    }
    # Acquire no locks
    self.needed_locks = {} # No, you can't leave it to the default value None

    """
    # The implementation of this method is mandatory only if the new LU is
    # concurrent, so that old LUs don't need to be changed all at the same
    # time.
    if self.REQ_BGL:
      self.needed_locks = {} # Exclusive LUs don't need locks.
    else:
      raise NotImplementedError

Guido Trotter's avatar
Guido Trotter committed
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
  def DeclareLocks(self, level):
    """Declare LU locking needs for a level

    While most LUs can just declare their locking needs at ExpandNames time,
    sometimes there's the need to calculate some locks after having acquired
    the ones before. This function is called just before acquiring locks at a
    particular level, but after acquiring the ones at lower levels, and permits
    such calculations. It can be used to modify self.needed_locks, and by
    default it does nothing.

    This function is only called if you have something already set in
    self.needed_locks for the level.

    @param level: Locking level which is going to be locked
    @type level: member of ganeti.locking.LEVELS

    """

Iustin Pop's avatar
Iustin Pop committed
176 177 178 179 180 181 182 183 184 185 186 187
  def CheckPrereq(self):
    """Check prerequisites for this LU.

    This method should check that the prerequisites for the execution
    of this LU are fulfilled. It can do internode communication, but
    it should be idempotent - no cluster or system changes are
    allowed.

    The method should raise errors.OpPrereqError in case something is
    not fulfilled. Its return value is ignored.

    This method should also update all the parameters of the opcode to
188
    their canonical form if it hasn't been done by ExpandNames before.
Iustin Pop's avatar
Iustin Pop committed
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216

    """
    raise NotImplementedError

  def Exec(self, feedback_fn):
    """Execute the LU.

    This method should implement the actual work. It should raise
    errors.OpExecError for failures that are somewhat dealt with in
    code, or expected.

    """
    raise NotImplementedError

  def BuildHooksEnv(self):
    """Build hooks environment for this LU.

    This method should return a three-node tuple consisting of: a dict
    containing the environment that will be used for running the
    specific hook for this LU, a list of node names on which the hook
    should run before the execution, and a list of node names on which
    the hook should run after the execution.

    The keys of the dict must not have 'GANETI_' prefixed as this will
    be handled in the hooks runner. Also note additional keys will be
    added by the hooks runner. If the LU doesn't define any
    environment, an empty dict (and not None) should be returned.

217
    No nodes should be returned as an empty list (and not None).
Iustin Pop's avatar
Iustin Pop committed
218 219 220 221 222 223 224

    Note that if the HPATH for a LU class is None, this function will
    not be called.

    """
    raise NotImplementedError

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
    """Notify the LU about the results of its hooks.

    This method is called every time a hooks phase is executed, and notifies
    the Logical Unit about the hooks' result. The LU can then use it to alter
    its result based on the hooks.  By default the method does nothing and the
    previous result is passed back unchanged but any LU can define it if it
    wants to use the local cluster hook-scripts somehow.

    Args:
      phase: the hooks phase that has just been run
      hooks_results: the results of the multi-node hooks rpc call
      feedback_fn: function to send feedback back to the caller
      lu_result: the previous result this LU had, or None in the PRE phase.

    """
    return lu_result

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
  def _ExpandAndLockInstance(self):
    """Helper function to expand and lock an instance.

    Many LUs that work on an instance take its name in self.op.instance_name
    and need to expand it and then declare the expanded name for locking. This
    function does it, and then updates self.op.instance_name to the expanded
    name. It also initializes needed_locks as a dict, if this hasn't been done
    before.

    """
    if self.needed_locks is None:
      self.needed_locks = {}
    else:
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
        "_ExpandAndLockInstance called with instance-level locks set"
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
    if expanded_name is None:
      raise errors.OpPrereqError("Instance '%s' not known" %
                                  self.op.instance_name)
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
    self.op.instance_name = expanded_name

265
  def _LockInstancesNodes(self, primary_only=False):
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
    """Helper function to declare instances' nodes for locking.

    This function should be called after locking one or more instances to lock
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
    with all primary or secondary nodes for instances already locked and
    present in self.needed_locks[locking.LEVEL_INSTANCE].

    It should be called from DeclareLocks, and for safety only works if
    self.recalculate_locks[locking.LEVEL_NODE] is set.

    In the future it may grow parameters to just lock some instance's nodes, or
    to just lock primaries or secondary nodes, if needed.

    If should be called in DeclareLocks in a way similar to:

    if level == locking.LEVEL_NODE:
      self._LockInstancesNodes()

284 285 286
    @type primary_only: boolean
    @param primary_only: only lock primary nodes of locked instances

287 288 289 290 291 292 293 294 295 296
    """
    assert locking.LEVEL_NODE in self.recalculate_locks, \
      "_LockInstancesNodes helper function called with no nodes to recalculate"

    # TODO: check if we're really been called with the instance locks held

    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
    # future we might want to have different behaviors depending on the value
    # of self.recalculate_locks[locking.LEVEL_NODE]
    wanted_nodes = []
297
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
298 299
      instance = self.context.cfg.GetInstanceInfo(instance_name)
      wanted_nodes.append(instance.primary_node)
300 301
      if not primary_only:
        wanted_nodes.extend(instance.secondary_nodes)
302 303 304 305 306

    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
307 308 309

    del self.recalculate_locks[locking.LEVEL_NODE]

Iustin Pop's avatar
Iustin Pop committed
310 311 312 313 314 315 316 317 318 319 320 321

class NoHooksLU(LogicalUnit):
  """Simple LU which runs no hooks.

  This LU is intended as a parent for other LogicalUnits which will
  run no hooks, in order to reduce duplicate code.

  """
  HPATH = None
  HTYPE = None


322
def _GetWantedNodes(lu, nodes):
323
  """Returns list of checked and expanded node names.
324 325 326 327 328

  Args:
    nodes: List of nodes (strings) or None for all

  """
329
  if not isinstance(nodes, list):
330
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
331

332 333 334
  if not nodes:
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
      " non-empty list of nodes whose name is to be expanded.")
335

336 337 338 339 340 341
  wanted = []
  for name in nodes:
    node = lu.cfg.ExpandNodeName(name)
    if node is None:
      raise errors.OpPrereqError("No such node name '%s'" % name)
    wanted.append(node)
342

343
  return utils.NiceSort(wanted)
344 345 346


def _GetWantedInstances(lu, instances):
347
  """Returns list of checked and expanded instance names.
348 349 350 351 352 353 354 355 356 357 358 359

  Args:
    instances: List of instances (strings) or None for all

  """
  if not isinstance(instances, list):
    raise errors.OpPrereqError("Invalid argument type 'instances'")

  if instances:
    wanted = []

    for name in instances:
360
      instance = lu.cfg.ExpandInstanceName(name)
361 362 363 364 365
      if instance is None:
        raise errors.OpPrereqError("No such instance name '%s'" % name)
      wanted.append(instance)

  else:
366 367
    wanted = lu.cfg.GetInstanceList()
  return utils.NiceSort(wanted)
368 369 370


def _CheckOutputFields(static, dynamic, selected):
371 372 373 374 375 376 377 378 379
  """Checks whether all selected fields are valid.

  Args:
    static: Static fields
    dynamic: Dynamic fields

  """
  static_fields = frozenset(static)
  dynamic_fields = frozenset(dynamic)
380

381
  all_fields = static_fields | dynamic_fields
382

383
  if not all_fields.issuperset(selected):
384 385 386
    raise errors.OpPrereqError("Unknown output fields selected: %s"
                               % ",".join(frozenset(selected).
                                          difference(all_fields)))
387 388


389
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
390
                          memory, vcpus, nics):
391 392 393 394
  """Builds instance related env variables for hooks from single variables.

  Args:
    secondary_nodes: List of secondary nodes as strings
395 396
  """
  env = {
397
    "OP_TARGET": name,
398 399 400
    "INSTANCE_NAME": name,
    "INSTANCE_PRIMARY": primary_node,
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
401
    "INSTANCE_OS_TYPE": os_type,
402 403 404 405 406 407 408
    "INSTANCE_STATUS": status,
    "INSTANCE_MEMORY": memory,
    "INSTANCE_VCPUS": vcpus,
  }

  if nics:
    nic_count = len(nics)
409
    for idx, (ip, bridge, mac) in enumerate(nics):
410 411 412 413
      if ip is None:
        ip = ""
      env["INSTANCE_NIC%d_IP" % idx] = ip
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
414
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
415 416 417 418 419 420 421 422 423
  else:
    nic_count = 0

  env["INSTANCE_NIC_COUNT"] = nic_count

  return env


def _BuildInstanceHookEnvByObject(instance, override=None):
424 425 426 427 428 429
  """Builds instance related env variables for hooks from an object.

  Args:
    instance: objects.Instance object of instance
    override: dict of values to override
  """
430 431 432 433
  args = {
    'name': instance.name,
    'primary_node': instance.primary_node,
    'secondary_nodes': instance.secondary_nodes,
434
    'os_type': instance.os,
435 436 437
    'status': instance.os,
    'memory': instance.memory,
    'vcpus': instance.vcpus,
438
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
439 440 441 442 443 444
  }
  if override:
    args.update(override)
  return _BuildInstanceHookEnv(**args)


445
def _CheckInstanceBridgesExist(lu, instance):
446 447 448 449 450 451 452 453 454 455 456
  """Check that the brigdes needed by an instance exist.

  """
  # check bridges existance
  brlist = [nic.bridge for nic in instance.nics]
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
    raise errors.OpPrereqError("one or more target bridges %s does not"
                               " exist on destination node '%s'" %
                               (brlist, instance.primary_node))


Iustin Pop's avatar
Iustin Pop committed
457 458 459 460 461 462 463 464 465 466 467 468 469 470
class LUDestroyCluster(NoHooksLU):
  """Logical unit for destroying the cluster.

  """
  _OP_REQP = []

  def CheckPrereq(self):
    """Check prerequisites.

    This checks whether the cluster is empty.

    Any errors are signalled by raising errors.OpPrereqError.

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
471
    master = self.cfg.GetMasterNode()
Iustin Pop's avatar
Iustin Pop committed
472 473

    nodelist = self.cfg.GetNodeList()
474
    if len(nodelist) != 1 or nodelist[0] != master:
475 476
      raise errors.OpPrereqError("There are still %d node(s) in"
                                 " this cluster." % (len(nodelist) - 1))
477 478
    instancelist = self.cfg.GetInstanceList()
    if instancelist:
479 480
      raise errors.OpPrereqError("There are still %d instance(s) in"
                                 " this cluster." % len(instancelist))
Iustin Pop's avatar
Iustin Pop committed
481 482 483 484 485

  def Exec(self, feedback_fn):
    """Destroys the cluster.

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
486
    master = self.cfg.GetMasterNode()
487
    if not rpc.call_node_stop_master(master, False):
488
      raise errors.OpExecError("Could not disable the master role")
489 490 491
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
    utils.CreateBackup(priv_key)
    utils.CreateBackup(pub_key)
Iustin Pop's avatar
Iustin Pop committed
492
    return master
Iustin Pop's avatar
Iustin Pop committed
493 494


Guido Trotter's avatar
Guido Trotter committed
495
class LUVerifyCluster(LogicalUnit):
Iustin Pop's avatar
Iustin Pop committed
496 497 498
  """Verifies the cluster status.

  """
Guido Trotter's avatar
Guido Trotter committed
499 500
  HPATH = "cluster-verify"
  HTYPE = constants.HTYPE_CLUSTER
501
  _OP_REQP = ["skip_checks"]
502 503 504 505 506 507 508 509
  REQ_BGL = False

  def ExpandNames(self):
    self.needed_locks = {
      locking.LEVEL_NODE: locking.ALL_SET,
      locking.LEVEL_INSTANCE: locking.ALL_SET,
    }
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
Iustin Pop's avatar
Iustin Pop committed
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524

  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
                  remote_version, feedback_fn):
    """Run multiple tests against a node.

    Test list:
      - compares ganeti version
      - checks vg existance and size > 20G
      - checks config file checksum
      - checks ssh to other nodes

    Args:
      node: name of the node to check
      file_list: required list of files
      local_cksum: dictionary of local files and their checksums
525

Iustin Pop's avatar
Iustin Pop committed
526 527 528 529
    """
    # compares ganeti version
    local_version = constants.PROTOCOL_VERSION
    if not remote_version:
530
      feedback_fn("  - ERROR: connection to %s failed" % (node))
Iustin Pop's avatar
Iustin Pop committed
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
      return True

    if local_version != remote_version:
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
                      (local_version, node, remote_version))
      return True

    # checks vg existance and size > 20G

    bad = False
    if not vglist:
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
                      (node,))
      bad = True
    else:
546 547
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
                                            constants.MIN_VG_SIZE)
Iustin Pop's avatar
Iustin Pop committed
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
      if vgstatus:
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
        bad = True

    # checks config file checksum
    # checks ssh to any

    if 'filelist' not in node_result:
      bad = True
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
    else:
      remote_cksum = node_result['filelist']
      for file_name in file_list:
        if file_name not in remote_cksum:
          bad = True
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
        elif remote_cksum[file_name] != local_cksum[file_name]:
          bad = True
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)

    if 'nodelist' not in node_result:
      bad = True
570
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
Iustin Pop's avatar
Iustin Pop committed
571 572 573 574
    else:
      if node_result['nodelist']:
        bad = True
        for node in node_result['nodelist']:
575
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
Iustin Pop's avatar
Iustin Pop committed
576
                          (node, node_result['nodelist'][node]))
577 578 579 580 581 582 583 584 585 586 587
    if 'node-net-test' not in node_result:
      bad = True
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
    else:
      if node_result['node-net-test']:
        bad = True
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
        for node in nlist:
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
                          (node, node_result['node-net-test'][node]))

Iustin Pop's avatar
Iustin Pop committed
588
    hyp_result = node_result.get('hypervisor', None)
589 590 591 592 593
    if isinstance(hyp_result, dict):
      for hv_name, hv_result in hyp_result.iteritems():
        if hv_result is not None:
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
                      (hv_name, hv_result))
Iustin Pop's avatar
Iustin Pop committed
594 595
    return bad

596 597
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
                      node_instance, feedback_fn):
Iustin Pop's avatar
Iustin Pop committed
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
    """Verify an instance.

    This function checks to see if the required block devices are
    available on the instance's node.

    """
    bad = False

    node_current = instanceconfig.primary_node

    node_vol_should = {}
    instanceconfig.MapLVsByNode(node_vol_should)

    for node in node_vol_should:
      for volume in node_vol_should[node]:
        if node not in node_vol_is or volume not in node_vol_is[node]:
          feedback_fn("  - ERROR: volume %s missing on node %s" %
                          (volume, node))
          bad = True

    if not instanceconfig.status == 'down':
619 620
      if (node_current not in node_instance or
          not instance in node_instance[node_current]):
Iustin Pop's avatar
Iustin Pop committed
621 622 623 624 625 626 627 628 629 630 631
        feedback_fn("  - ERROR: instance %s not running on node %s" %
                        (instance, node_current))
        bad = True

    for node in node_instance:
      if (not node == node_current):
        if instance in node_instance[node]:
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
                          (instance, node))
          bad = True

632
    return bad
Iustin Pop's avatar
Iustin Pop committed
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665

  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
    """Verify if there are any unknown volumes in the cluster.

    The .os, .swap and backup volumes are ignored. All other volumes are
    reported as unknown.

    """
    bad = False

    for node in node_vol_is:
      for volume in node_vol_is[node]:
        if node not in node_vol_should or volume not in node_vol_should[node]:
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
                      (volume, node))
          bad = True
    return bad

  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
    """Verify the list of running instances.

    This checks what instances are running but unknown to the cluster.

    """
    bad = False
    for node in node_instance:
      for runninginstance in node_instance[node]:
        if runninginstance not in instancelist:
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
                          (runninginstance, node))
          bad = True
    return bad

666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
    """Verify N+1 Memory Resilience.

    Check that if one single node dies we can still start all the instances it
    was primary for.

    """
    bad = False

    for node, nodeinfo in node_info.iteritems():
      # This code checks that every node which is now listed as secondary has
      # enough memory to host all instances it is supposed to should a single
      # other node in the cluster fail.
      # FIXME: not ready for failover to an arbitrary node
      # FIXME: does not support file-backed instances
      # WARNING: we currently take into account down instances as well as up
      # ones, considering that even if they're down someone might want to start
      # them even in the event of a node failure.
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
        needed_mem = 0
        for instance in instances:
          needed_mem += instance_cfg[instance].memory
        if nodeinfo['mfree'] < needed_mem:
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
                      " failovers should node %s fail" % (node, prinode))
          bad = True
    return bad

Iustin Pop's avatar
Iustin Pop committed
694 695 696
  def CheckPrereq(self):
    """Check prerequisites.

697 698
    Transform the list of checks we're going to skip into a set and check that
    all its members are valid.
Iustin Pop's avatar
Iustin Pop committed
699 700

    """
701 702 703
    self.skip_set = frozenset(self.op.skip_checks)
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
Iustin Pop's avatar
Iustin Pop committed
704

Guido Trotter's avatar
Guido Trotter committed
705 706 707 708 709 710 711 712 713 714 715 716
  def BuildHooksEnv(self):
    """Build hooks env.

    Cluster-Verify hooks just rone in the post phase and their failure makes
    the output be logged in the verify output and the verification to fail.

    """
    all_nodes = self.cfg.GetNodeList()
    # TODO: populate the environment with useful information for verify hooks
    env = {}
    return env, [], all_nodes

Iustin Pop's avatar
Iustin Pop committed
717 718 719 720 721 722
  def Exec(self, feedback_fn):
    """Verify integrity of cluster, performing various test on nodes.

    """
    bad = False
    feedback_fn("* Verifying global settings")
723 724
    for msg in self.cfg.VerifyConfig():
      feedback_fn("  - ERROR: %s" % msg)
Iustin Pop's avatar
Iustin Pop committed
725 726

    vg_name = self.cfg.GetVGName()
727
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
Iustin Pop's avatar
Iustin Pop committed
728
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
729
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
Iustin Pop's avatar
Iustin Pop committed
730
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
731
    i_non_redundant = [] # Non redundant instances
Iustin Pop's avatar
Iustin Pop committed
732 733
    node_volume = {}
    node_instance = {}
734
    node_info = {}
Guido Trotter's avatar
Guido Trotter committed
735
    instance_cfg = {}
Iustin Pop's avatar
Iustin Pop committed
736 737 738

    # FIXME: verify OS list
    # do local checksums
Michael Hanselmann's avatar
Michael Hanselmann committed
739
    file_names = []
740 741
    file_names.append(constants.SSL_CERT_FILE)
    file_names.append(constants.CLUSTER_CONF_FILE)
Iustin Pop's avatar
Iustin Pop committed
742 743 744 745
    local_checksums = utils.FingerprintFiles(file_names)

    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
746
    all_instanceinfo = rpc.call_instance_list(nodelist, hypervisors)
Iustin Pop's avatar
Iustin Pop committed
747 748 749 750
    all_vglist = rpc.call_vg_list(nodelist)
    node_verify_param = {
      'filelist': file_names,
      'nodelist': nodelist,
751
      'hypervisor': hypervisors,
752 753
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
                        for node in nodeinfo]
Iustin Pop's avatar
Iustin Pop committed
754
      }
755 756
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param,
                                      self.cfg.GetClusterName())
Iustin Pop's avatar
Iustin Pop committed
757
    all_rversion = rpc.call_version(nodelist)
758 759
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName(),
                                   self.cfg.GetHypervisorType())
Iustin Pop's avatar
Iustin Pop committed
760 761 762 763 764 765 766 767 768 769 770

    for node in nodelist:
      feedback_fn("* Verifying node %s" % node)
      result = self._VerifyNode(node, file_names, local_checksums,
                                all_vglist[node], all_nvinfo[node],
                                all_rversion[node], feedback_fn)
      bad = bad or result

      # node_volume
      volumeinfo = all_volumeinfo[node]

771 772 773 774 775 776
      if isinstance(volumeinfo, basestring):
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
                    (node, volumeinfo[-400:].encode('string_escape')))
        bad = True
        node_volume[node] = {}
      elif not isinstance(volumeinfo, dict):
Iustin Pop's avatar
Iustin Pop committed
777 778 779
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue
780 781
      else:
        node_volume[node] = volumeinfo
Iustin Pop's avatar
Iustin Pop committed
782 783 784 785 786 787 788 789 790 791

      # node_instance
      nodeinstance = all_instanceinfo[node]
      if type(nodeinstance) != list:
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue

      node_instance[node] = nodeinstance

792 793 794 795 796 797 798 799 800 801 802
      # node_info
      nodeinfo = all_ninfo[node]
      if not isinstance(nodeinfo, dict):
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue

      try:
        node_info[node] = {
          "mfree": int(nodeinfo['memory_free']),
          "dfree": int(nodeinfo['vg_free']),
803 804
          "pinst": [],
          "sinst": [],
805 806 807 808 809 810 811
          # dictionary holding all instances this node is secondary for,
          # grouped by their primary node. Each key is a cluster node, and each
          # value is a list of instances which have the key as primary and the
          # current node as secondary.  this is handy to calculate N+1 memory
          # availability if you can only failover from a primary to its
          # secondary.
          "sinst-by-pnode": {},
812 813 814 815 816 817
        }
      except ValueError:
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
        bad = True
        continue

Iustin Pop's avatar
Iustin Pop committed
818 819 820 821 822
    node_vol_should = {}

    for instance in instancelist:
      feedback_fn("* Verifying instance %s" % instance)
      inst_config = self.cfg.GetInstanceInfo(instance)
823 824 825
      result =  self._VerifyInstance(instance, inst_config, node_volume,
                                     node_instance, feedback_fn)
      bad = bad or result
Iustin Pop's avatar
Iustin Pop committed
826 827 828

      inst_config.MapLVsByNode(node_vol_should)

Guido Trotter's avatar
Guido Trotter committed
829 830
      instance_cfg[instance] = inst_config

831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852
      pnode = inst_config.primary_node
      if pnode in node_info:
        node_info[pnode]['pinst'].append(instance)
      else:
        feedback_fn("  - ERROR: instance %s, connection to primary node"
                    " %s failed" % (instance, pnode))
        bad = True

      # If the instance is non-redundant we cannot survive losing its primary
      # node, so we are not N+1 compliant. On the other hand we have no disk
      # templates with more than one secondary so that situation is not well
      # supported either.
      # FIXME: does not support file-backed instances
      if len(inst_config.secondary_nodes) == 0:
        i_non_redundant.append(instance)
      elif len(inst_config.secondary_nodes) > 1:
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
                    % instance)

      for snode in inst_config.secondary_nodes:
        if snode in node_info:
          node_info[snode]['sinst'].append(instance)
853 854 855
          if pnode not in node_info[snode]['sinst-by-pnode']:
            node_info[snode]['sinst-by-pnode'][pnode] = []
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
856 857 858 859
        else:
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
                      " %s failed" % (instance, snode))

Iustin Pop's avatar
Iustin Pop committed
860 861 862 863 864 865 866 867 868 869
    feedback_fn("* Verifying orphan volumes")
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
                                       feedback_fn)
    bad = bad or result

    feedback_fn("* Verifying remaining instances")
    result = self._VerifyOrphanInstances(instancelist, node_instance,
                                         feedback_fn)
    bad = bad or result

870 871 872 873
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
      feedback_fn("* Verifying N+1 Memory redundancy")
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
      bad = bad or result
874 875 876 877 878 879

    feedback_fn("* Other Notes")
    if i_non_redundant:
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
                  % len(i_non_redundant))

880
    return not bad
Iustin Pop's avatar
Iustin Pop committed
881

Guido Trotter's avatar
Guido Trotter committed
882 883 884 885 886 887 888 889 890 891 892
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
    """Analize the post-hooks' result, handle it, and send some
    nicely-formatted feedback back to the user.

    Args:
      phase: the hooks phase that has just been run
      hooks_results: the results of the multi-node hooks rpc call
      feedback_fn: function to send feedback back to the caller
      lu_result: previous Exec result

    """
Iustin Pop's avatar
Iustin Pop committed
893 894
    # We only really run POST phase hooks, and are only interested in
    # their results
Guido Trotter's avatar
Guido Trotter committed
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
    if phase == constants.HOOKS_PHASE_POST:
      # Used to change hooks' output to proper indentation
      indent_re = re.compile('^', re.M)
      feedback_fn("* Hooks Results")
      if not hooks_results:
        feedback_fn("  - ERROR: general communication failure")
        lu_result = 1
      else:
        for node_name in hooks_results:
          show_node_header = True
          res = hooks_results[node_name]
          if res is False or not isinstance(res, list):
            feedback_fn("    Communication failure")
            lu_result = 1
            continue
          for script, hkr, output in res:
            if hkr == constants.HKR_FAIL:
              # The node header is only shown once, if there are
              # failing hooks on that node
              if show_node_header:
                feedback_fn("  Node %s:" % node_name)
                show_node_header = False
              feedback_fn("    ERROR: Script %s failed, output:" % script)
              output = indent_re.sub('      ', output)
              feedback_fn("%s" % output)
              lu_result = 1

      return lu_result

Iustin Pop's avatar
Iustin Pop committed
924

Iustin Pop's avatar
Iustin Pop committed
925 926 927 928 929
class LUVerifyDisks(NoHooksLU):
  """Verifies the cluster disks status.

  """
  _OP_REQP = []
930 931 932 933 934 935 936 937
  REQ_BGL = False

  def ExpandNames(self):
    self.needed_locks = {
      locking.LEVEL_NODE: locking.ALL_SET,
      locking.LEVEL_INSTANCE: locking.ALL_SET,
    }
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
Iustin Pop's avatar
Iustin Pop committed
938 939 940 941 942 943 944 945 946 947 948 949 950

  def CheckPrereq(self):
    """Check prerequisites.

    This has no prerequisites.

    """
    pass

  def Exec(self, feedback_fn):
    """Verify integrity of cluster disks.

    """
951
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
Iustin Pop's avatar
Iustin Pop committed
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979

    vg_name = self.cfg.GetVGName()
    nodes = utils.NiceSort(self.cfg.GetNodeList())
    instances = [self.cfg.GetInstanceInfo(name)
                 for name in self.cfg.GetInstanceList()]

    nv_dict = {}
    for inst in instances:
      inst_lvs = {}
      if (inst.status != "up" or
          inst.disk_template not in constants.DTS_NET_MIRROR):
        continue
      inst.MapLVsByNode(inst_lvs)
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
      for node, vol_list in inst_lvs.iteritems():
        for vol in vol_list:
          nv_dict[(node, vol)] = inst

    if not nv_dict:
      return result

    node_lvs = rpc.call_volume_list(nodes, vg_name)

    to_act = set()
    for node in nodes:
      # node_volume
      lvs = node_lvs[node]

980 981 982 983
      if isinstance(lvs, basestring):
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
        res_nlvm[node] = lvs
      elif not isinstance(lvs, dict):
Iustin Pop's avatar
Iustin Pop committed
984 985 986 987 988 989
        logger.Info("connection to node %s failed or invalid data returned" %
                    (node,))
        res_nodes.append(node)
        continue

      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
990 991 992
        inst = nv_dict.pop((node, lv_name), None)
        if (not lv_online and inst is not None
            and inst.name not in res_instances):
Iustin Pop's avatar
Iustin Pop committed
993
          res_instances.append(inst.name)
Iustin Pop's avatar
Iustin Pop committed
994

995 996 997 998 999 1000 1001
    # any leftover items in nv_dict are missing LVs, let's arrange the
    # data better
    for key, inst in nv_dict.iteritems():
      if inst.name not in res_missing:
        res_missing[inst.name] = []
      res_missing[inst.name].append(key)

Iustin Pop's avatar
Iustin Pop committed
1002 1003 1004
    return result


1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
class LURenameCluster(LogicalUnit):
  """Rename the cluster.

  """
  HPATH = "cluster-rename"
  HTYPE = constants.HTYPE_CLUSTER
  _OP_REQP = ["name"]

  def BuildHooksEnv(self):
    """Build hooks env.

    """
    env = {
Michael Hanselmann's avatar
Michael Hanselmann committed
1018
      "OP_TARGET": self.cfg.GetClusterName(),
1019 1020
      "NEW_NAME": self.op.name,
      }
Michael Hanselmann's avatar
Michael Hanselmann committed
1021
    mn = self.cfg.GetMasterNode()
1022 1023 1024 1025 1026 1027
    return env, [mn], [mn]

  def CheckPrereq(self):
    """Verify that the passed name is a valid one.

    """
1028
    hostname = utils.HostInfo(self.op.name)
1029

1030 1031
    new_name = hostname.name
    self.ip = new_ip = hostname.ip
Michael Hanselmann's avatar
Michael Hanselmann committed
1032 1033
    old_name = self.cfg.GetClusterName()
    old_ip = self.cfg.GetMasterIP()
1034 1035 1036 1037
    if new_name == old_name and new_ip == old_ip:
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
                                 " cluster has changed")
    if new_ip != old_ip:
1038
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
                                   " reachable on the network. Aborting." %
                                   new_ip)

    self.op.name = new_name

  def Exec(self, feedback_fn):
    """Rename the cluster.

    """
    clustername = self.op.name
    ip = self.ip

    # shutdown the master IP
Michael Hanselmann's avatar
Michael Hanselmann committed
1053
    master = self.cfg.GetMasterNode()
1054
    if not rpc.call_node_stop_master(master, False):
1055 1056 1057 1058
      raise errors.OpExecError("Could not disable the master role")

    try:
      # modify the sstore
Michael Hanselmann's avatar
Michael Hanselmann committed
1059
      # TODO: sstore
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
      ss.SetKey(ss.SS_MASTER_IP, ip)
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)

      # Distribute updated ss config to all nodes
      myself = self.cfg.GetNodeInfo(master)
      dist_nodes = self.cfg.GetNodeList()
      if myself.name in dist_nodes:
        dist_nodes.remove(myself.name)

      logger.Debug("Copying updated ssconf data to all nodes")
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
        fname = ss.KeyToFilename(keyname)
        result = rpc.call_upload_file(dist_nodes, fname)
        for to_node in dist_nodes:
          if not result[to_node]:
            logger.Error("copy of file %s to node %s failed" %
                         (fname, to_node))
    finally:
1078
      if not rpc.call_node_start_master(master, False):
1079 1080
        logger.Error("Could not re-enable the master role on the master,"
                     " please restart manually.")
1081 1082


1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
def _RecursiveCheckIfLVMBased(disk):
  """Check if the given disk or its children are lvm-based.

  Args:
    disk: ganeti.objects.Disk object

  Returns:
    boolean indicating whether a LD_LV dev_type was found or not

  """
  if disk.children:
    for chdisk in disk.children:
      if _RecursiveCheckIfLVMBased(chdisk):
        return True
  return disk.dev_type == constants.LD_LV


class LUSetClusterParams(LogicalUnit):
  """Change the parameters of the cluster.

  """
  HPATH = "cluster-modify"
  HTYPE = constants.HTYPE_CLUSTER
  _OP_REQP = []
1107 1108 1109 1110 1111 1112 1113 1114 1115
  REQ_BGL = False

  def ExpandNames(self):
    # FIXME: in the future maybe other cluster params won't require checking on
    # all nodes to be modified.
    self.needed_locks = {
      locking.LEVEL_NODE: locking.ALL_SET,
    }
    self.share_locks[locking.LEVEL_NODE] = 1
1116 1117 1118 1119 1120 1121

  def BuildHooksEnv(self):
    """Build hooks env.

    """
    env = {
Michael Hanselmann's avatar
Michael Hanselmann committed
1122
      "OP_TARGET": self.cfg.GetClusterName(),
1123 1124
      "NEW_VG_NAME": self.op.vg_name,
      }
Michael Hanselmann's avatar
Michael Hanselmann committed
1125
    mn = self.cfg.GetMasterNode()
1126 1127 1128 1129 1130 1131
    return env, [mn], [mn]

  def CheckPrereq(self):
    """Check prerequisites.

    This checks whether the given params don't conflict and
1132
    if the given volume group is valid.
1133 1134

    """
1135 1136
    # FIXME: This only works because there is only one parameter that can be
    # changed or removed.
1137
    if not self.op.vg_name:
1138
      instances = self.cfg.GetAllInstancesInfo().values()
1139 1140 1141 1142 1143 1144 1145 1146
      for inst in instances:
        for disk in inst.disks:
          if _RecursiveCheckIfLVMBased(disk):
            raise errors.OpPrereqError("Cannot disable lvm storage while"
                                       " lvm-based instances exist")

    # if vg_name not None, checks given volume group on all nodes
    if self.op.vg_name:
1147
      node_list = self.acquired_locks[locking.LEVEL_NODE]
1148 1149
      vglist = rpc.call_vg_list(node_list)
      for node in node_list:
1150 1151
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
                                              constants.MIN_VG_SIZE)
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
        if vgstatus:
          raise errors.OpPrereqError("Error on node '%s': %s" %
                                     (node, vgstatus))

  def Exec(self, feedback_fn):
    """Change the parameters of the cluster.

    """
    if self.op.vg_name != self.cfg.GetVGName():
      self.cfg.SetVGName(self.op.vg_name)
    else:
      feedback_fn("Cluster LVM configuration already in desired"
                  " state, not changing")


1167
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
Iustin Pop's avatar
Iustin Pop committed
1168 1169 1170 1171 1172 1173 1174
  """Sleep and poll for an instance's disk to sync.

  """
  if not instance.disks:
    return True

  if not oneshot:
1175
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
Iustin Pop's avatar
Iustin Pop committed
1176 1177 1178 1179

  node = instance.primary_node

  for dev in instance.disks:
1180
    lu.cfg.SetDiskID(dev, node)
Iustin Pop's avatar
Iustin Pop committed
1181 1182 1183 1184 1185 1186 1187 1188

  retries = 0
  while True:
    max_time = 0
    done = True
    cumul_degraded = False
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
    if not rstats:
1189
      lu.proc.LogWarning("Can't get any data from node %s" % node)
Iustin Pop's avatar
Iustin Pop committed
1190 1191
      retries += 1
      if retries >= 10:
1192 1193
        raise errors.RemoteError("Can't contact node %s for mirror data,"
                                 " aborting." % node)
Iustin Pop's avatar
Iustin Pop committed
1194 1195 1196 1197 1198 1199
      time.sleep(6)
      continue
    retries = 0
    for i in range(len(rstats)):
      mstat = rstats[i]
      if mstat is None:
1200 1201
        lu.proc.LogWarning("Can't compute data for node %s/%s" %
                           (node, instance.disks[i].iv_name))
Iustin Pop's avatar
Iustin Pop committed
1202
        continue
1203 1204
      # we ignore the ldisk parameter
      perc_done, est_time, is_degraded, _ = mstat
Iustin Pop's avatar
Iustin Pop committed
1205 1206 1207 1208 1209 1210 1211 1212
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
      if perc_done is not None:
        done = False
        if est_time is not None:
          rem_time = "%d estimated seconds remaining" % est_time
          max_time = est_time
        else:
          rem_time = "no time estimate"
1213 1214
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
                        (instance.disks[i].iv_name, perc_done, rem_time))
Iustin Pop's avatar
Iustin Pop committed
1215 1216 1217
    if done or oneshot:
      break

Iustin Pop's avatar
Iustin Pop committed
1218
    time.sleep(min(60, max_time))
Iustin Pop's avatar
Iustin Pop committed
1219 1220

  if done:
1221
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
Iustin Pop's avatar
Iustin Pop committed
1222 1223 1224
  return not cumul_degraded


1225
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
Iustin Pop's avatar
Iustin Pop committed
1226 1227
  """Check that mirrors are not degraded.

1228 1229 1230 1231
  The ldisk parameter, if True, will change the test from the
  is_degraded attribute (which represents overall non-ok status for
  the device(s)) to the ldisk (representing the local storage status).

Iustin Pop's avatar
Iustin Pop committed
1232
  """
1233
  lu.cfg.SetDiskID(dev, node)
1234 1235 1236 1237
  if ldisk:
    idx = 6
  else:
    idx = 5
Iustin Pop's avatar
Iustin Pop committed