cmdlib.py 190 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
#
Iustin Pop's avatar
Iustin Pop committed
2
3
#

4
# Copyright (C) 2006, 2007, 2008 Google Inc.
Iustin Pop's avatar
Iustin Pop committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


22
"""Module implementing the master-side code."""
Iustin Pop's avatar
Iustin Pop committed
23
24
25
26
27
28
29
30
31
32

# pylint: disable-msg=W0613,W0201

import os
import os.path
import sha
import time
import tempfile
import re
import platform
33
import logging
34
import copy
Iustin Pop's avatar
Iustin Pop committed
35
36
37
38
39

from ganeti import ssh
from ganeti import utils
from ganeti import errors
from ganeti import hypervisor
Guido Trotter's avatar
Guido Trotter committed
40
from ganeti import locking
Iustin Pop's avatar
Iustin Pop committed
41
42
43
from ganeti import constants
from ganeti import objects
from ganeti import opcodes
44
from ganeti import serializer
45
46


Iustin Pop's avatar
Iustin Pop committed
47
class LogicalUnit(object):
48
  """Logical Unit base class.
Iustin Pop's avatar
Iustin Pop committed
49
50

  Subclasses must follow these rules:
51
52
    - implement ExpandNames
    - implement CheckPrereq
Iustin Pop's avatar
Iustin Pop committed
53
54
55
    - implement Exec
    - implement BuildHooksEnv
    - redefine HPATH and HTYPE
56
57
    - optionally redefine their run requirements:
        REQ_MASTER: the LU needs to run on the master node
58
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
59
60

  Note that all commands require root permissions.
Iustin Pop's avatar
Iustin Pop committed
61
62
63
64
65
66

  """
  HPATH = None
  HTYPE = None
  _OP_REQP = []
  REQ_MASTER = True
67
  REQ_BGL = True
Iustin Pop's avatar
Iustin Pop committed
68

Iustin Pop's avatar
Iustin Pop committed
69
  def __init__(self, processor, op, context, rpc):
Iustin Pop's avatar
Iustin Pop committed
70
71
72
73
74
75
    """Constructor for LogicalUnit.

    This needs to be overriden in derived classes in order to check op
    validity.

    """
Iustin Pop's avatar
Iustin Pop committed
76
    self.proc = processor
Iustin Pop's avatar
Iustin Pop committed
77
    self.op = op
Guido Trotter's avatar
Guido Trotter committed
78
79
    self.cfg = context.cfg
    self.context = context
Iustin Pop's avatar
Iustin Pop committed
80
    self.rpc = rpc
81
    # Dicts used to declare locking needs to mcpu
82
    self.needed_locks = None
83
    self.acquired_locks = {}
Guido Trotter's avatar
Guido Trotter committed
84
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85
86
    self.add_locks = {}
    self.remove_locks = {}
87
88
    # Used to force good behavior when calling helper functions
    self.recalculate_locks = {}
89
90
    self.__ssh = None

Iustin Pop's avatar
Iustin Pop committed
91
92
93
    for attr_name in self._OP_REQP:
      attr_val = getattr(op, attr_name, None)
      if attr_val is None:
94
95
        raise errors.OpPrereqError("Required parameter '%s' missing" %
                                   attr_name)
96

Guido Trotter's avatar
Guido Trotter committed
97
    if not self.cfg.IsCluster():
98
99
100
      raise errors.OpPrereqError("Cluster not initialized yet,"
                                 " use 'gnt-cluster init' first.")
    if self.REQ_MASTER:
Michael Hanselmann's avatar
Michael Hanselmann committed
101
      master = self.cfg.GetMasterNode()
102
103
104
      if master != utils.HostInfo().name:
        raise errors.OpPrereqError("Commands must be run on the master"
                                   " node %s" % master)
Iustin Pop's avatar
Iustin Pop committed
105

106
107
108
109
110
  def __GetSSH(self):
    """Returns the SshRunner object

    """
    if not self.__ssh:
111
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
112
113
114
115
    return self.__ssh

  ssh = property(fget=__GetSSH)

116
117
118
119
120
121
122
123
124
125
126
127
128
129
  def ExpandNames(self):
    """Expand names for this LU.

    This method is called before starting to execute the opcode, and it should
    update all the parameters of the opcode to their canonical form (e.g. a
    short node name must be fully expanded after this method has successfully
    completed). This way locking, hooks, logging, ecc. can work correctly.

    LUs which implement this method must also populate the self.needed_locks
    member, as a dict with lock levels as keys, and a list of needed lock names
    as values. Rules:
      - Use an empty dict if you don't need any lock
      - If you don't need any lock at a particular level omit that level
      - Don't put anything for the BGL level
130
      - If you want all locks at a level use locking.ALL_SET as a value
131

Guido Trotter's avatar
Guido Trotter committed
132
133
134
135
    If you need to share locks (rather than acquire them exclusively) at one
    level you can modify self.share_locks, setting a true value (usually 1) for
    that level. By default locks are not shared.

136
137
138
    Examples:
    # Acquire all nodes and one instance
    self.needed_locks = {
139
      locking.LEVEL_NODE: locking.ALL_SET,
140
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
    }
    # Acquire just two nodes
    self.needed_locks = {
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
    }
    # Acquire no locks
    self.needed_locks = {} # No, you can't leave it to the default value None

    """
    # The implementation of this method is mandatory only if the new LU is
    # concurrent, so that old LUs don't need to be changed all at the same
    # time.
    if self.REQ_BGL:
      self.needed_locks = {} # Exclusive LUs don't need locks.
    else:
      raise NotImplementedError

Guido Trotter's avatar
Guido Trotter committed
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
  def DeclareLocks(self, level):
    """Declare LU locking needs for a level

    While most LUs can just declare their locking needs at ExpandNames time,
    sometimes there's the need to calculate some locks after having acquired
    the ones before. This function is called just before acquiring locks at a
    particular level, but after acquiring the ones at lower levels, and permits
    such calculations. It can be used to modify self.needed_locks, and by
    default it does nothing.

    This function is only called if you have something already set in
    self.needed_locks for the level.

    @param level: Locking level which is going to be locked
    @type level: member of ganeti.locking.LEVELS

    """

Iustin Pop's avatar
Iustin Pop committed
176
177
178
179
180
181
182
183
184
185
186
187
  def CheckPrereq(self):
    """Check prerequisites for this LU.

    This method should check that the prerequisites for the execution
    of this LU are fulfilled. It can do internode communication, but
    it should be idempotent - no cluster or system changes are
    allowed.

    The method should raise errors.OpPrereqError in case something is
    not fulfilled. Its return value is ignored.

    This method should also update all the parameters of the opcode to
188
    their canonical form if it hasn't been done by ExpandNames before.
Iustin Pop's avatar
Iustin Pop committed
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216

    """
    raise NotImplementedError

  def Exec(self, feedback_fn):
    """Execute the LU.

    This method should implement the actual work. It should raise
    errors.OpExecError for failures that are somewhat dealt with in
    code, or expected.

    """
    raise NotImplementedError

  def BuildHooksEnv(self):
    """Build hooks environment for this LU.

    This method should return a three-node tuple consisting of: a dict
    containing the environment that will be used for running the
    specific hook for this LU, a list of node names on which the hook
    should run before the execution, and a list of node names on which
    the hook should run after the execution.

    The keys of the dict must not have 'GANETI_' prefixed as this will
    be handled in the hooks runner. Also note additional keys will be
    added by the hooks runner. If the LU doesn't define any
    environment, an empty dict (and not None) should be returned.

217
    No nodes should be returned as an empty list (and not None).
Iustin Pop's avatar
Iustin Pop committed
218
219
220
221
222
223
224

    Note that if the HPATH for a LU class is None, this function will
    not be called.

    """
    raise NotImplementedError

225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
    """Notify the LU about the results of its hooks.

    This method is called every time a hooks phase is executed, and notifies
    the Logical Unit about the hooks' result. The LU can then use it to alter
    its result based on the hooks.  By default the method does nothing and the
    previous result is passed back unchanged but any LU can define it if it
    wants to use the local cluster hook-scripts somehow.

    Args:
      phase: the hooks phase that has just been run
      hooks_results: the results of the multi-node hooks rpc call
      feedback_fn: function to send feedback back to the caller
      lu_result: the previous result this LU had, or None in the PRE phase.

    """
    return lu_result

243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
  def _ExpandAndLockInstance(self):
    """Helper function to expand and lock an instance.

    Many LUs that work on an instance take its name in self.op.instance_name
    and need to expand it and then declare the expanded name for locking. This
    function does it, and then updates self.op.instance_name to the expanded
    name. It also initializes needed_locks as a dict, if this hasn't been done
    before.

    """
    if self.needed_locks is None:
      self.needed_locks = {}
    else:
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
        "_ExpandAndLockInstance called with instance-level locks set"
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
    if expanded_name is None:
      raise errors.OpPrereqError("Instance '%s' not known" %
                                  self.op.instance_name)
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
    self.op.instance_name = expanded_name

265
  def _LockInstancesNodes(self, primary_only=False):
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
    """Helper function to declare instances' nodes for locking.

    This function should be called after locking one or more instances to lock
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
    with all primary or secondary nodes for instances already locked and
    present in self.needed_locks[locking.LEVEL_INSTANCE].

    It should be called from DeclareLocks, and for safety only works if
    self.recalculate_locks[locking.LEVEL_NODE] is set.

    In the future it may grow parameters to just lock some instance's nodes, or
    to just lock primaries or secondary nodes, if needed.

    If should be called in DeclareLocks in a way similar to:

    if level == locking.LEVEL_NODE:
      self._LockInstancesNodes()

284
285
286
    @type primary_only: boolean
    @param primary_only: only lock primary nodes of locked instances

287
288
289
290
291
292
293
294
295
296
    """
    assert locking.LEVEL_NODE in self.recalculate_locks, \
      "_LockInstancesNodes helper function called with no nodes to recalculate"

    # TODO: check if we're really been called with the instance locks held

    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
    # future we might want to have different behaviors depending on the value
    # of self.recalculate_locks[locking.LEVEL_NODE]
    wanted_nodes = []
297
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
298
299
      instance = self.context.cfg.GetInstanceInfo(instance_name)
      wanted_nodes.append(instance.primary_node)
300
301
      if not primary_only:
        wanted_nodes.extend(instance.secondary_nodes)
302
303
304
305
306

    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
307
308
309

    del self.recalculate_locks[locking.LEVEL_NODE]

Iustin Pop's avatar
Iustin Pop committed
310
311
312
313
314
315
316
317
318
319
320
321

class NoHooksLU(LogicalUnit):
  """Simple LU which runs no hooks.

  This LU is intended as a parent for other LogicalUnits which will
  run no hooks, in order to reduce duplicate code.

  """
  HPATH = None
  HTYPE = None


322
def _GetWantedNodes(lu, nodes):
323
  """Returns list of checked and expanded node names.
324
325
326
327
328

  Args:
    nodes: List of nodes (strings) or None for all

  """
329
  if not isinstance(nodes, list):
330
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
331

332
333
334
  if not nodes:
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
      " non-empty list of nodes whose name is to be expanded.")
335

336
337
338
339
340
341
  wanted = []
  for name in nodes:
    node = lu.cfg.ExpandNodeName(name)
    if node is None:
      raise errors.OpPrereqError("No such node name '%s'" % name)
    wanted.append(node)
342

343
  return utils.NiceSort(wanted)
344
345
346


def _GetWantedInstances(lu, instances):
347
  """Returns list of checked and expanded instance names.
348
349
350
351
352
353
354
355
356
357
358
359

  Args:
    instances: List of instances (strings) or None for all

  """
  if not isinstance(instances, list):
    raise errors.OpPrereqError("Invalid argument type 'instances'")

  if instances:
    wanted = []

    for name in instances:
360
      instance = lu.cfg.ExpandInstanceName(name)
361
362
363
364
365
      if instance is None:
        raise errors.OpPrereqError("No such instance name '%s'" % name)
      wanted.append(instance)

  else:
366
367
    wanted = lu.cfg.GetInstanceList()
  return utils.NiceSort(wanted)
368
369
370


def _CheckOutputFields(static, dynamic, selected):
371
372
373
374
375
376
377
378
379
  """Checks whether all selected fields are valid.

  Args:
    static: Static fields
    dynamic: Dynamic fields

  """
  static_fields = frozenset(static)
  dynamic_fields = frozenset(dynamic)
380

381
  all_fields = static_fields | dynamic_fields
382

383
  if not all_fields.issuperset(selected):
384
385
386
    raise errors.OpPrereqError("Unknown output fields selected: %s"
                               % ",".join(frozenset(selected).
                                          difference(all_fields)))
387
388


389
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
390
                          memory, vcpus, nics):
391
392
393
394
  """Builds instance related env variables for hooks from single variables.

  Args:
    secondary_nodes: List of secondary nodes as strings
395
396
  """
  env = {
397
    "OP_TARGET": name,
398
399
400
    "INSTANCE_NAME": name,
    "INSTANCE_PRIMARY": primary_node,
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
401
    "INSTANCE_OS_TYPE": os_type,
402
403
404
405
406
407
408
    "INSTANCE_STATUS": status,
    "INSTANCE_MEMORY": memory,
    "INSTANCE_VCPUS": vcpus,
  }

  if nics:
    nic_count = len(nics)
409
    for idx, (ip, bridge, mac) in enumerate(nics):
410
411
412
413
      if ip is None:
        ip = ""
      env["INSTANCE_NIC%d_IP" % idx] = ip
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
414
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
415
416
417
418
419
420
421
422
  else:
    nic_count = 0

  env["INSTANCE_NIC_COUNT"] = nic_count

  return env


Iustin Pop's avatar
Iustin Pop committed
423
def _BuildInstanceHookEnvByObject(lu, instance, override=None):
424
425
426
427
428
429
  """Builds instance related env variables for hooks from an object.

  Args:
    instance: objects.Instance object of instance
    override: dict of values to override
  """
Iustin Pop's avatar
Iustin Pop committed
430
  bep = lu.cfg.GetClusterInfo().FillBE(instance)
431
432
433
434
  args = {
    'name': instance.name,
    'primary_node': instance.primary_node,
    'secondary_nodes': instance.secondary_nodes,
435
    'os_type': instance.os,
436
    'status': instance.os,
Iustin Pop's avatar
Iustin Pop committed
437
438
    'memory': bep[constants.BE_MEMORY],
    'vcpus': bep[constants.BE_VCPUS],
439
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
440
441
442
443
444
445
  }
  if override:
    args.update(override)
  return _BuildInstanceHookEnv(**args)


446
def _CheckInstanceBridgesExist(lu, instance):
447
448
449
450
451
  """Check that the brigdes needed by an instance exist.

  """
  # check bridges existance
  brlist = [nic.bridge for nic in instance.nics]
Iustin Pop's avatar
Iustin Pop committed
452
  if not lu.rpc.call_bridges_exist(instance.primary_node, brlist):
453
454
455
456
457
    raise errors.OpPrereqError("one or more target bridges %s does not"
                               " exist on destination node '%s'" %
                               (brlist, instance.primary_node))


Iustin Pop's avatar
Iustin Pop committed
458
459
460
461
462
463
464
465
466
467
468
469
470
471
class LUDestroyCluster(NoHooksLU):
  """Logical unit for destroying the cluster.

  """
  _OP_REQP = []

  def CheckPrereq(self):
    """Check prerequisites.

    This checks whether the cluster is empty.

    Any errors are signalled by raising errors.OpPrereqError.

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
472
    master = self.cfg.GetMasterNode()
Iustin Pop's avatar
Iustin Pop committed
473
474

    nodelist = self.cfg.GetNodeList()
475
    if len(nodelist) != 1 or nodelist[0] != master:
476
477
      raise errors.OpPrereqError("There are still %d node(s) in"
                                 " this cluster." % (len(nodelist) - 1))
478
479
    instancelist = self.cfg.GetInstanceList()
    if instancelist:
480
481
      raise errors.OpPrereqError("There are still %d instance(s) in"
                                 " this cluster." % len(instancelist))
Iustin Pop's avatar
Iustin Pop committed
482
483
484
485
486

  def Exec(self, feedback_fn):
    """Destroys the cluster.

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
487
    master = self.cfg.GetMasterNode()
Iustin Pop's avatar
Iustin Pop committed
488
    if not self.rpc.call_node_stop_master(master, False):
489
      raise errors.OpExecError("Could not disable the master role")
490
491
492
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
    utils.CreateBackup(priv_key)
    utils.CreateBackup(pub_key)
Iustin Pop's avatar
Iustin Pop committed
493
    return master
Iustin Pop's avatar
Iustin Pop committed
494
495


Guido Trotter's avatar
Guido Trotter committed
496
class LUVerifyCluster(LogicalUnit):
Iustin Pop's avatar
Iustin Pop committed
497
498
499
  """Verifies the cluster status.

  """
Guido Trotter's avatar
Guido Trotter committed
500
501
  HPATH = "cluster-verify"
  HTYPE = constants.HTYPE_CLUSTER
502
  _OP_REQP = ["skip_checks"]
503
504
505
506
507
508
509
510
  REQ_BGL = False

  def ExpandNames(self):
    self.needed_locks = {
      locking.LEVEL_NODE: locking.ALL_SET,
      locking.LEVEL_INSTANCE: locking.ALL_SET,
    }
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
Iustin Pop's avatar
Iustin Pop committed
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525

  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
                  remote_version, feedback_fn):
    """Run multiple tests against a node.

    Test list:
      - compares ganeti version
      - checks vg existance and size > 20G
      - checks config file checksum
      - checks ssh to other nodes

    Args:
      node: name of the node to check
      file_list: required list of files
      local_cksum: dictionary of local files and their checksums
526

Iustin Pop's avatar
Iustin Pop committed
527
528
529
530
    """
    # compares ganeti version
    local_version = constants.PROTOCOL_VERSION
    if not remote_version:
531
      feedback_fn("  - ERROR: connection to %s failed" % (node))
Iustin Pop's avatar
Iustin Pop committed
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
      return True

    if local_version != remote_version:
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
                      (local_version, node, remote_version))
      return True

    # checks vg existance and size > 20G

    bad = False
    if not vglist:
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
                      (node,))
      bad = True
    else:
547
548
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
                                            constants.MIN_VG_SIZE)
Iustin Pop's avatar
Iustin Pop committed
549
550
551
552
      if vgstatus:
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
        bad = True

553
554
555
556
    if not node_result:
      feedback_fn("  - ERROR: unable to verify node %s." % (node,))
      return True

Iustin Pop's avatar
Iustin Pop committed
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
    # checks config file checksum
    # checks ssh to any

    if 'filelist' not in node_result:
      bad = True
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
    else:
      remote_cksum = node_result['filelist']
      for file_name in file_list:
        if file_name not in remote_cksum:
          bad = True
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
        elif remote_cksum[file_name] != local_cksum[file_name]:
          bad = True
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)

    if 'nodelist' not in node_result:
      bad = True
575
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
Iustin Pop's avatar
Iustin Pop committed
576
577
578
579
    else:
      if node_result['nodelist']:
        bad = True
        for node in node_result['nodelist']:
580
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
Iustin Pop's avatar
Iustin Pop committed
581
                          (node, node_result['nodelist'][node]))
582
583
584
585
586
587
588
589
590
591
592
    if 'node-net-test' not in node_result:
      bad = True
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
    else:
      if node_result['node-net-test']:
        bad = True
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
        for node in nlist:
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
                          (node, node_result['node-net-test'][node]))

Iustin Pop's avatar
Iustin Pop committed
593
    hyp_result = node_result.get('hypervisor', None)
594
595
596
597
598
    if isinstance(hyp_result, dict):
      for hv_name, hv_result in hyp_result.iteritems():
        if hv_result is not None:
          feedback_fn("  - ERROR: hypervisor %s verify failure: '%s'" %
                      (hv_name, hv_result))
Iustin Pop's avatar
Iustin Pop committed
599
600
    return bad

601
602
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
                      node_instance, feedback_fn):
Iustin Pop's avatar
Iustin Pop committed
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
    """Verify an instance.

    This function checks to see if the required block devices are
    available on the instance's node.

    """
    bad = False

    node_current = instanceconfig.primary_node

    node_vol_should = {}
    instanceconfig.MapLVsByNode(node_vol_should)

    for node in node_vol_should:
      for volume in node_vol_should[node]:
        if node not in node_vol_is or volume not in node_vol_is[node]:
          feedback_fn("  - ERROR: volume %s missing on node %s" %
                          (volume, node))
          bad = True

    if not instanceconfig.status == 'down':
624
625
      if (node_current not in node_instance or
          not instance in node_instance[node_current]):
Iustin Pop's avatar
Iustin Pop committed
626
627
628
629
630
631
632
633
634
635
636
        feedback_fn("  - ERROR: instance %s not running on node %s" %
                        (instance, node_current))
        bad = True

    for node in node_instance:
      if (not node == node_current):
        if instance in node_instance[node]:
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
                          (instance, node))
          bad = True

637
    return bad
Iustin Pop's avatar
Iustin Pop committed
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670

  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
    """Verify if there are any unknown volumes in the cluster.

    The .os, .swap and backup volumes are ignored. All other volumes are
    reported as unknown.

    """
    bad = False

    for node in node_vol_is:
      for volume in node_vol_is[node]:
        if node not in node_vol_should or volume not in node_vol_should[node]:
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
                      (volume, node))
          bad = True
    return bad

  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
    """Verify the list of running instances.

    This checks what instances are running but unknown to the cluster.

    """
    bad = False
    for node in node_instance:
      for runninginstance in node_instance[node]:
        if runninginstance not in instancelist:
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
                          (runninginstance, node))
          bad = True
    return bad

671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
    """Verify N+1 Memory Resilience.

    Check that if one single node dies we can still start all the instances it
    was primary for.

    """
    bad = False

    for node, nodeinfo in node_info.iteritems():
      # This code checks that every node which is now listed as secondary has
      # enough memory to host all instances it is supposed to should a single
      # other node in the cluster fail.
      # FIXME: not ready for failover to an arbitrary node
      # FIXME: does not support file-backed instances
      # WARNING: we currently take into account down instances as well as up
      # ones, considering that even if they're down someone might want to start
      # them even in the event of a node failure.
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
        needed_mem = 0
        for instance in instances:
Iustin Pop's avatar
Iustin Pop committed
692
          bep = self.cfg.GetClusterInfo().FillBE(instance_cfg[instance])
693
          if bep[constants.BE_AUTO_BALANCE]:
694
            needed_mem += bep[constants.BE_MEMORY]
695
696
697
698
699
700
        if nodeinfo['mfree'] < needed_mem:
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
                      " failovers should node %s fail" % (node, prinode))
          bad = True
    return bad

Iustin Pop's avatar
Iustin Pop committed
701
702
703
  def CheckPrereq(self):
    """Check prerequisites.

704
705
    Transform the list of checks we're going to skip into a set and check that
    all its members are valid.
Iustin Pop's avatar
Iustin Pop committed
706
707

    """
708
709
710
    self.skip_set = frozenset(self.op.skip_checks)
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
Iustin Pop's avatar
Iustin Pop committed
711

Guido Trotter's avatar
Guido Trotter committed
712
713
714
715
716
717
718
719
720
721
722
723
  def BuildHooksEnv(self):
    """Build hooks env.

    Cluster-Verify hooks just rone in the post phase and their failure makes
    the output be logged in the verify output and the verification to fail.

    """
    all_nodes = self.cfg.GetNodeList()
    # TODO: populate the environment with useful information for verify hooks
    env = {}
    return env, [], all_nodes

Iustin Pop's avatar
Iustin Pop committed
724
725
726
727
728
729
  def Exec(self, feedback_fn):
    """Verify integrity of cluster, performing various test on nodes.

    """
    bad = False
    feedback_fn("* Verifying global settings")
730
731
    for msg in self.cfg.VerifyConfig():
      feedback_fn("  - ERROR: %s" % msg)
Iustin Pop's avatar
Iustin Pop committed
732
733

    vg_name = self.cfg.GetVGName()
734
    hypervisors = self.cfg.GetClusterInfo().enabled_hypervisors
Iustin Pop's avatar
Iustin Pop committed
735
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
736
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
Iustin Pop's avatar
Iustin Pop committed
737
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
738
    i_non_redundant = [] # Non redundant instances
739
    i_non_a_balanced = [] # Non auto-balanced instances
Iustin Pop's avatar
Iustin Pop committed
740
741
    node_volume = {}
    node_instance = {}
742
    node_info = {}
Guido Trotter's avatar
Guido Trotter committed
743
    instance_cfg = {}
Iustin Pop's avatar
Iustin Pop committed
744
745
746

    # FIXME: verify OS list
    # do local checksums
Michael Hanselmann's avatar
Michael Hanselmann committed
747
    file_names = []
748
749
    file_names.append(constants.SSL_CERT_FILE)
    file_names.append(constants.CLUSTER_CONF_FILE)
Iustin Pop's avatar
Iustin Pop committed
750
751
752
    local_checksums = utils.FingerprintFiles(file_names)

    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
Iustin Pop's avatar
Iustin Pop committed
753
754
755
    all_volumeinfo = self.rpc.call_volume_list(nodelist, vg_name)
    all_instanceinfo = self.rpc.call_instance_list(nodelist, hypervisors)
    all_vglist = self.rpc.call_vg_list(nodelist)
Iustin Pop's avatar
Iustin Pop committed
756
757
758
    node_verify_param = {
      'filelist': file_names,
      'nodelist': nodelist,
759
      'hypervisor': hypervisors,
760
761
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
                        for node in nodeinfo]
Iustin Pop's avatar
Iustin Pop committed
762
      }
Iustin Pop's avatar
Iustin Pop committed
763
764
765
766
767
    all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param,
                                           self.cfg.GetClusterName())
    all_rversion = self.rpc.call_version(nodelist)
    all_ninfo = self.rpc.call_node_info(nodelist, self.cfg.GetVGName(),
                                        self.cfg.GetHypervisorType())
Iustin Pop's avatar
Iustin Pop committed
768

769
    cluster = self.cfg.GetClusterInfo()
Iustin Pop's avatar
Iustin Pop committed
770
771
772
773
774
775
776
777
778
779
    for node in nodelist:
      feedback_fn("* Verifying node %s" % node)
      result = self._VerifyNode(node, file_names, local_checksums,
                                all_vglist[node], all_nvinfo[node],
                                all_rversion[node], feedback_fn)
      bad = bad or result

      # node_volume
      volumeinfo = all_volumeinfo[node]

780
781
782
783
784
785
      if isinstance(volumeinfo, basestring):
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
                    (node, volumeinfo[-400:].encode('string_escape')))
        bad = True
        node_volume[node] = {}
      elif not isinstance(volumeinfo, dict):
Iustin Pop's avatar
Iustin Pop committed
786
787
788
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue
789
790
      else:
        node_volume[node] = volumeinfo
Iustin Pop's avatar
Iustin Pop committed
791
792
793
794
795
796
797
798
799
800

      # node_instance
      nodeinstance = all_instanceinfo[node]
      if type(nodeinstance) != list:
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue

      node_instance[node] = nodeinstance

801
802
803
804
805
806
807
808
809
810
811
      # node_info
      nodeinfo = all_ninfo[node]
      if not isinstance(nodeinfo, dict):
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue

      try:
        node_info[node] = {
          "mfree": int(nodeinfo['memory_free']),
          "dfree": int(nodeinfo['vg_free']),
812
813
          "pinst": [],
          "sinst": [],
814
815
816
817
818
819
820
          # dictionary holding all instances this node is secondary for,
          # grouped by their primary node. Each key is a cluster node, and each
          # value is a list of instances which have the key as primary and the
          # current node as secondary.  this is handy to calculate N+1 memory
          # availability if you can only failover from a primary to its
          # secondary.
          "sinst-by-pnode": {},
821
822
823
824
825
826
        }
      except ValueError:
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
        bad = True
        continue

Iustin Pop's avatar
Iustin Pop committed
827
828
829
830
831
    node_vol_should = {}

    for instance in instancelist:
      feedback_fn("* Verifying instance %s" % instance)
      inst_config = self.cfg.GetInstanceInfo(instance)
832
833
834
      result =  self._VerifyInstance(instance, inst_config, node_volume,
                                     node_instance, feedback_fn)
      bad = bad or result
Iustin Pop's avatar
Iustin Pop committed
835
836
837

      inst_config.MapLVsByNode(node_vol_should)

Guido Trotter's avatar
Guido Trotter committed
838
839
      instance_cfg[instance] = inst_config

840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
      pnode = inst_config.primary_node
      if pnode in node_info:
        node_info[pnode]['pinst'].append(instance)
      else:
        feedback_fn("  - ERROR: instance %s, connection to primary node"
                    " %s failed" % (instance, pnode))
        bad = True

      # If the instance is non-redundant we cannot survive losing its primary
      # node, so we are not N+1 compliant. On the other hand we have no disk
      # templates with more than one secondary so that situation is not well
      # supported either.
      # FIXME: does not support file-backed instances
      if len(inst_config.secondary_nodes) == 0:
        i_non_redundant.append(instance)
      elif len(inst_config.secondary_nodes) > 1:
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
                    % instance)

859
      if not cluster.FillBE(inst_config)[constants.BE_AUTO_BALANCE]:
860
861
        i_non_a_balanced.append(instance)

862
863
864
      for snode in inst_config.secondary_nodes:
        if snode in node_info:
          node_info[snode]['sinst'].append(instance)
865
866
867
          if pnode not in node_info[snode]['sinst-by-pnode']:
            node_info[snode]['sinst-by-pnode'][pnode] = []
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
868
869
870
871
        else:
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
                      " %s failed" % (instance, snode))

Iustin Pop's avatar
Iustin Pop committed
872
873
874
875
876
877
878
879
880
881
    feedback_fn("* Verifying orphan volumes")
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
                                       feedback_fn)
    bad = bad or result

    feedback_fn("* Verifying remaining instances")
    result = self._VerifyOrphanInstances(instancelist, node_instance,
                                         feedback_fn)
    bad = bad or result

882
883
884
885
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
      feedback_fn("* Verifying N+1 Memory redundancy")
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
      bad = bad or result
886
887
888
889
890
891

    feedback_fn("* Other Notes")
    if i_non_redundant:
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
                  % len(i_non_redundant))

892
893
894
895
    if i_non_a_balanced:
      feedback_fn("  - NOTICE: %d non-auto-balanced instance(s) found."
                  % len(i_non_a_balanced))

896
    return not bad
Iustin Pop's avatar
Iustin Pop committed
897

Guido Trotter's avatar
Guido Trotter committed
898
899
900
901
902
903
904
905
906
907
908
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
    """Analize the post-hooks' result, handle it, and send some
    nicely-formatted feedback back to the user.

    Args:
      phase: the hooks phase that has just been run
      hooks_results: the results of the multi-node hooks rpc call
      feedback_fn: function to send feedback back to the caller
      lu_result: previous Exec result

    """
Iustin Pop's avatar
Iustin Pop committed
909
910
    # We only really run POST phase hooks, and are only interested in
    # their results
Guido Trotter's avatar
Guido Trotter committed
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
    if phase == constants.HOOKS_PHASE_POST:
      # Used to change hooks' output to proper indentation
      indent_re = re.compile('^', re.M)
      feedback_fn("* Hooks Results")
      if not hooks_results:
        feedback_fn("  - ERROR: general communication failure")
        lu_result = 1
      else:
        for node_name in hooks_results:
          show_node_header = True
          res = hooks_results[node_name]
          if res is False or not isinstance(res, list):
            feedback_fn("    Communication failure")
            lu_result = 1
            continue
          for script, hkr, output in res:
            if hkr == constants.HKR_FAIL:
              # The node header is only shown once, if there are
              # failing hooks on that node
              if show_node_header:
                feedback_fn("  Node %s:" % node_name)
                show_node_header = False
              feedback_fn("    ERROR: Script %s failed, output:" % script)
              output = indent_re.sub('      ', output)
              feedback_fn("%s" % output)
              lu_result = 1

      return lu_result

Iustin Pop's avatar
Iustin Pop committed
940

Iustin Pop's avatar
Iustin Pop committed
941
942
943
944
945
class LUVerifyDisks(NoHooksLU):
  """Verifies the cluster disks status.

  """
  _OP_REQP = []
946
947
948
949
950
951
952
953
  REQ_BGL = False

  def ExpandNames(self):
    self.needed_locks = {
      locking.LEVEL_NODE: locking.ALL_SET,
      locking.LEVEL_INSTANCE: locking.ALL_SET,
    }
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
Iustin Pop's avatar
Iustin Pop committed
954
955
956
957
958
959
960
961
962
963
964
965
966

  def CheckPrereq(self):
    """Check prerequisites.

    This has no prerequisites.

    """
    pass

  def Exec(self, feedback_fn):
    """Verify integrity of cluster disks.

    """
967
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
Iustin Pop's avatar
Iustin Pop committed
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988

    vg_name = self.cfg.GetVGName()
    nodes = utils.NiceSort(self.cfg.GetNodeList())
    instances = [self.cfg.GetInstanceInfo(name)
                 for name in self.cfg.GetInstanceList()]

    nv_dict = {}
    for inst in instances:
      inst_lvs = {}
      if (inst.status != "up" or
          inst.disk_template not in constants.DTS_NET_MIRROR):
        continue
      inst.MapLVsByNode(inst_lvs)
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
      for node, vol_list in inst_lvs.iteritems():
        for vol in vol_list:
          nv_dict[(node, vol)] = inst

    if not nv_dict:
      return result

Iustin Pop's avatar
Iustin Pop committed
989
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
Iustin Pop's avatar
Iustin Pop committed
990
991
992
993
994
995

    to_act = set()
    for node in nodes:
      # node_volume
      lvs = node_lvs[node]

996
      if isinstance(lvs, basestring):
997
        logging.warning("Error enumerating LVs on node %s: %s", node, lvs)
998
999
        res_nlvm[node] = lvs
      elif not isinstance(lvs, dict):
1000
1001
        logging.warning("Connection to node %s failed or invalid data"
                        " returned", node)
Iustin Pop's avatar
Iustin Pop committed
1002
1003
1004
1005
        res_nodes.append(node)
        continue

      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
1006
1007
1008
        inst = nv_dict.pop((node, lv_name), None)
        if (not lv_online and inst is not None
            and inst.name not in res_instances):
Iustin Pop's avatar
Iustin Pop committed
1009
          res_instances.append(inst.name)
Iustin Pop's avatar
Iustin Pop committed
1010

1011
1012
1013
1014
1015
1016
1017
    # any leftover items in nv_dict are missing LVs, let's arrange the
    # data better
    for key, inst in nv_dict.iteritems():
      if inst.name not in res_missing:
        res_missing[inst.name] = []
      res_missing[inst.name].append(key)

Iustin Pop's avatar
Iustin Pop committed
1018
1019
1020
    return result


1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
class LURenameCluster(LogicalUnit):
  """Rename the cluster.

  """
  HPATH = "cluster-rename"
  HTYPE = constants.HTYPE_CLUSTER
  _OP_REQP = ["name"]

  def BuildHooksEnv(self):
    """Build hooks env.

    """
    env = {
Michael Hanselmann's avatar
Michael Hanselmann committed
1034
      "OP_TARGET": self.cfg.GetClusterName(),
1035
1036
      "NEW_NAME": self.op.name,
      }
Michael Hanselmann's avatar
Michael Hanselmann committed
1037
    mn = self.cfg.GetMasterNode()
1038
1039
1040
1041
1042
1043
    return env, [mn], [mn]

  def CheckPrereq(self):
    """Verify that the passed name is a valid one.

    """
1044
    hostname = utils.HostInfo(self.op.name)
1045

1046
1047
    new_name = hostname.name
    self.ip = new_ip = hostname.ip
Michael Hanselmann's avatar
Michael Hanselmann committed
1048
1049
    old_name = self.cfg.GetClusterName()
    old_ip = self.cfg.GetMasterIP()
1050
1051
1052
1053
    if new_name == old_name and new_ip == old_ip:
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
                                 " cluster has changed")
    if new_ip != old_ip:
1054
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
                                   " reachable on the network. Aborting." %
                                   new_ip)

    self.op.name = new_name

  def Exec(self, feedback_fn):
    """Rename the cluster.

    """
    clustername = self.op.name
    ip = self.ip

    # shutdown the master IP
Michael Hanselmann's avatar
Michael Hanselmann committed
1069
    master = self.cfg.GetMasterNode()
Iustin Pop's avatar
Iustin Pop committed
1070
    if not self.rpc.call_node_stop_master(master, False):
1071
1072
1073
1074
      raise errors.OpExecError("Could not disable the master role")

    try:
      # modify the sstore
Michael Hanselmann's avatar
Michael Hanselmann committed
1075
      # TODO: sstore
1076
1077
1078
1079
1080
1081
1082
1083
1084
      ss.SetKey(ss.SS_MASTER_IP, ip)
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)

      # Distribute updated ss config to all nodes
      myself = self.cfg.GetNodeInfo(master)
      dist_nodes = self.cfg.GetNodeList()
      if myself.name in dist_nodes:
        dist_nodes.remove(myself.name)

1085
      logging.debug("Copying updated ssconf data to all nodes")
1086
1087
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
        fname = ss.KeyToFilename(keyname)
Iustin Pop's avatar
Iustin Pop committed
1088
        result = self.rpc.call_upload_file(dist_nodes, fname)
1089
1090
        for to_node in dist_nodes:
          if not result[to_node]:
1091
            logging.error("Copy of file %s to node %s failed", fname, to_node)
1092
    finally:
Iustin Pop's avatar
Iustin Pop committed
1093
      if not self.rpc.call_node_start_master(master, False):
1094
1095
        logging.error("Could not re-enable the master role on the master,"
                      " please restart manually.")
1096
1097


1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
def _RecursiveCheckIfLVMBased(disk):
  """Check if the given disk or its children are lvm-based.

  Args:
    disk: ganeti.objects.Disk object

  Returns:
    boolean indicating whether a LD_LV dev_type was found or not

  """
  if disk.children:
    for chdisk in disk.children:
      if _RecursiveCheckIfLVMBased(chdisk):
        return True
  return disk.dev_type == constants.LD_LV


class LUSetClusterParams(LogicalUnit):
  """Change the parameters of the cluster.

  """
  HPATH = "cluster-modify"
  HTYPE = constants.HTYPE_CLUSTER
  _OP_REQP = []
1122
1123
1124
1125
1126
1127
1128
1129
1130
  REQ_BGL = False

  def ExpandNames(self):
    # FIXME: in the future maybe other cluster params won't require checking on
    # all nodes to be modified.
    self.needed_locks = {
      locking.LEVEL_NODE: locking.ALL_SET,
    }
    self.share_locks[locking.LEVEL_NODE] = 1
1131
1132
1133
1134
1135
1136

  def BuildHooksEnv(self):
    """Build hooks env.

    """
    env = {
Michael Hanselmann's avatar
Michael Hanselmann committed
1137
      "OP_TARGET": self.cfg.GetClusterName(),
1138
1139
      "NEW_VG_NAME": self.op.vg_name,
      }
Michael Hanselmann's avatar
Michael Hanselmann committed
1140
    mn = self.cfg.GetMasterNode()
1141
1142
1143
1144
1145
1146
    return env, [mn], [mn]

  def CheckPrereq(self):
    """Check prerequisites.

    This checks whether the given params don't conflict and
1147
    if the given volume group is valid.
1148
1149

    """
1150
1151
    # FIXME: This only works because there is only one parameter that can be
    # changed or removed.
1152
    if self.op.vg_name is not None and not self.op.vg_name:
1153
      instances = self.cfg.GetAllInstancesInfo().values()
1154
1155
1156
1157
1158
1159
      for inst in instances:
        for disk in inst.disks:
          if _RecursiveCheckIfLVMBased(disk):
            raise errors.OpPrereqError("Cannot disable lvm storage while"
                                       " lvm-based instances exist")

1160
1161
    node_list = self.acquired_locks[locking.LEVEL_NODE]

1162
1163
    # if vg_name not None, checks given volume group on all nodes
    if self.op.vg_name:
Iustin Pop's avatar
Iustin Pop committed
1164
      vglist = self.rpc.call_vg_list(node_list)
1165
      for node in node_list:
1166
1167
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
                                              constants.MIN_VG_SIZE)
1168
1169
1170
1171
        if vgstatus:
          raise errors.OpPrereqError("Error on node '%s': %s" %
                                     (node, vgstatus))

1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
    self.cluster = cluster = self.cfg.GetClusterInfo()
    # beparams changes do not need validation (we can't validate?),
    # but we still process here
    if self.op.beparams:
      self.new_beparams = cluster.FillDict(
        cluster.beparams[constants.BEGR_DEFAULT], self.op.beparams)

    # hypervisor list/parameters
    self.new_hvparams = cluster.FillDict(cluster.hvparams, {})
    if self.op.hvparams:
      if not isinstance(self.op.hvparams, dict):
        raise errors.OpPrereqError("Invalid 'hvparams' parameter on input")
      for hv_name, hv_dict in self.op.hvparams.items():
        if hv_name not in self.new_hvparams:
          self.new_hvparams[hv_name] = hv_dict
        else:
          self.new_hvparams[hv_name].update(hv_dict)

    if self.op.enabled_hypervisors is not None:
      self.hv_list = self.op.enabled_hypervisors
    else:
      self.hv_list = cluster.enabled_hypervisors

    if self.op.hvparams or self.op.enabled_hypervisors is not None:
      # either the enabled list has changed, or the parameters have, validate
      for hv_name, hv_params in self.new_hvparams.items():
        if ((self.op.hvparams and hv_name in self.op.hvparams) or
            (self.op.enabled_hypervisors and
             hv_name in self.op.enabled_hypervisors)):
          # either this is a new hypervisor, or its parameters have changed
          hv_class = hypervisor.GetHypervisor(hv_name)
          hv_class.CheckParameterSyntax(hv_params)
          _CheckHVParams(self, node_list, hv_name, hv_params)

1206
1207
1208
1209
  def Exec(self, feedback_fn):
    """Change the parameters of the cluster.

    """
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
    if self.op.vg_name is not None:
      if self.op.vg_name != self.cfg.GetVGName():
        self.cfg.SetVGName(self.op.vg_name)
      else:
        feedback_fn("Cluster LVM configuration already in desired"
                    " state, not changing")
    if self.op.hvparams:
      self.cluster.hvparams = self.new_hvparams
    if self.op.enabled_hypervisors is not None:
      self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
    if self.op.beparams:
      self.cluster.beparams[constants.BEGR_DEFAULT] = self.new_beparams
    self.cfg.Update(self.cluster)
1223
1224


1225
def _WaitForSync(lu, instance, oneshot=False, unlock=False):
Iustin Pop's avatar
Iustin Pop committed
1226
1227
1228
1229
1230
1231
1232
  """Sleep and poll for an instance's disk to sync.

  """
  if not instance.disks:
    return True

  if not oneshot:
1233
    lu.proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
Iustin Pop's avatar
Iustin Pop committed
1234
1235
1236
1237

  node = instance.primary_node

  for dev in instance.disks:
1238
    lu.cfg.SetDiskID(dev, node)
Iustin Pop's avatar
Iustin Pop committed
1239
1240
1241
1242
1243
1244

  retries = 0
  while True:
    max_time = 0
    done = True
    cumul_degraded = False
Iustin Pop's avatar
Iustin Pop committed
1245
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, instance.disks)
Iustin Pop's avatar
Iustin Pop committed
1246
    if not rstats:
1247
      lu.proc.LogWarning("Can't get any data from node %s" % node)
Iustin Pop's avatar
Iustin Pop committed
1248
1249
      retries += 1
      if retries >= 10:
1250
1251
        raise errors.RemoteError("Can't contact node %s for mirror data,"
                                 " aborting." % node)
Iustin Pop's avatar
Iustin Pop committed
1252
1253
1254
1255
1256
1257
      time.sleep(6)
      continue
    retries = 0
    for i in range(len(rstats)):
      mstat = rstats[i]
      if mstat is None:
1258
1259
        lu.proc.LogWarning("Can't compute data for node %s/%s" %
                           (node, instance.disks[i].iv_name))
Iustin Pop's avatar
Iustin Pop committed
1260
        continue
1261
1262
      # we ignore the ldisk parameter
      perc_done, est_time, is_degraded, _ = mstat
Iustin Pop's avatar
Iustin Pop committed
1263
1264
1265
1266
1267
1268
1269
1270
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
      if perc_done is not None:
        done = False
        if est_time is not None:
          rem_time = "%d estimated seconds remaining" % est_time
          max_time = est_time
        else:
          rem_time = "no time estimate"
1271
1272
        lu.proc.LogInfo("- device %s: %5.2f%% done, %s" %
                        (instance.disks[i].iv_name, perc_done, rem_time))
Iustin Pop's avatar
Iustin Pop committed
1273
1274
1275
    if done or oneshot:
      break

Iustin Pop's avatar
Iustin Pop committed
1276
    time.sleep(min(60, max_time))
Iustin Pop's avatar
Iustin Pop committed
1277
1278

  if done:
1279
    lu.proc.LogInfo("Instance %s's disks are in sync." % instance.name)
Iustin Pop's avatar
Iustin Pop committed
1280
1281
1282
  return not cumul_degraded


1283
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
Iustin Pop's avatar
Iustin Pop committed
1284
1285
  """Check that mirrors are not degraded.

1286
1287
1288
1289
  The ldisk parameter, if True, will change the test from the
  is_degraded attribute (which represents overall non-ok status for
  the device(s)) to the ldisk (representing the local storage status).

Iustin Pop's avatar
Iustin Pop committed
1290
  """
1291
  lu.cfg.SetDiskID(dev, node)
1292
1293
1294
1295
  if ldisk:
    idx = 6
  else:
    idx = 5
Iustin Pop's avatar
Iustin Pop committed
1296
1297
1298

  result = True
  if on_primary or dev.AssembleOnSecondary():
Iustin Pop's avatar
Iustin Pop committed
1299
    rstats = lu.rpc.call_blockdev_find(node, dev)
Iustin Pop's avatar
Iustin Pop committed
1300
    if not rstats:
1301
      logging.warning("Node %s: disk degraded, not found or node down", node)
Iustin Pop's avatar
Iustin Pop committed
1302
1303
      result = False
    else:
1304
      result = result and (not rstats[idx])
Iustin Pop's avatar
Iustin Pop committed
1305
1306
  if dev.children:
    for child in dev.children:
1307
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
Iustin Pop's avatar
Iustin Pop committed
1308
1309
1310
1311
1312
1313
1314
1315

  return result


class LUDiagnoseOS(NoHooksLU):
  """Logical unit for OS diagnose/query.

  """
1316
  _OP_REQP = ["output_fields", "names"]
Guido Trotter's avatar
Guido Trotter committed
1317
  REQ_BGL = False
Iustin Pop's avatar
Iustin Pop committed
1318

Guido Trotter's avatar
Guido Trotter committed
1319
  def ExpandNames(self):
1320
1321
1322
1323
1324
1325
1326
1327
    if self.op.names:
      raise errors.OpPrereqError("Selective OS query not supported")

    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
    _CheckOutputFields(static=[],
                       dynamic=self.dynamic_fields,
                       selected=self.op.output_fields)

Guido Trotter's avatar
Guido Trotter committed
1328
1329
1330
    # Lock all nodes, in shared mode
    self.needed_locks = {}
    self.share_locks[locking.LEVEL_NODE] = 1
1331
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
Guido Trotter's avatar
Guido Trotter committed
1332
1333
1334
1335
1336
1337

  def CheckPrereq(self):
    """Check prerequisites.

    """

1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
  @staticmethod
  def _DiagnoseByOS(node_list, rlist):
    """Remaps a per-node return list into an a per-os per-node dictionary

      Args:
        node_list: a list with the names of all nodes
        rlist: a map with node names as keys and OS objects as values

      Returns:
        map: a map with osnames as keys and as value another map, with
             nodes as
             keys and list of OS objects as values
             e.g. {"debian-etch": {"node1": [<object>,...],
                                   "node2": [<object>,]}
                  }

    """
    all_os = {}
    for node_name, nr in rlist.iteritems():
      if not nr:
        continue
Iustin Pop's avatar
Iustin Pop committed
1359
1360
      for os_obj in nr:
        if os_obj.name not in all_os:
1361
1362
          # build a list of nodes for this os containing empty lists
          # for each node in node_list
Iustin Pop's avatar
Iustin Pop committed
1363
          all_os[os_obj.name] = {}
1364
          for nname in node_list:
Iustin Pop's avatar
Iustin Pop committed
1365
1366
            all_os[os_obj.name][nname] = []
        all_os[os_obj.name][node_name].append(os_obj)
1367
    return all_os
Iustin Pop's avatar
Iustin Pop committed
1368
1369
1370
1371
1372

  def Exec(self, feedback_fn):
    """Compute the list of OSes.

    """
Guido Trotter's avatar
Guido Trotter committed
1373
    node_list = self.acquired_locks[locking.LEVEL_NODE]
Iustin Pop's avatar
Iustin Pop committed
1374
    node_data = self.rpc.call_os_diagnose(node_list)
Iustin Pop's avatar
Iustin Pop committed
1375
    if node_data == False:
1376
      raise errors.OpExecError("Can't gather the list of OSes")
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
    pol = self._DiagnoseByOS(node_list, node_data)
    output = []
    for os_name, os_data in pol.iteritems():
      row = []
      for field in self.op.output_fields:
        if field == "name":
          val = os_name
        elif field == "valid":
          val = utils.all([osl and osl[0] for osl in os_data.values()])
        elif field == "node_status":
          val = {}
          for node_name, nos_list<