cmdlib.py 188 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
#
Iustin Pop's avatar
Iustin Pop committed
2
3
#

4
# Copyright (C) 2006, 2007, 2008 Google Inc.
Iustin Pop's avatar
Iustin Pop committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


22
"""Module implementing the master-side code."""
Iustin Pop's avatar
Iustin Pop committed
23
24
25
26
27
28
29
30
31
32

# pylint: disable-msg=W0613,W0201

import os
import os.path
import sha
import time
import tempfile
import re
import platform
33
import logging
Iustin Pop's avatar
Iustin Pop committed
34
35
36
37
38
39
40

from ganeti import rpc
from ganeti import ssh
from ganeti import logger
from ganeti import utils
from ganeti import errors
from ganeti import hypervisor
Guido Trotter's avatar
Guido Trotter committed
41
from ganeti import locking
Iustin Pop's avatar
Iustin Pop committed
42
43
44
from ganeti import constants
from ganeti import objects
from ganeti import opcodes
45
from ganeti import serializer
46
47


Iustin Pop's avatar
Iustin Pop committed
48
class LogicalUnit(object):
49
  """Logical Unit base class.
Iustin Pop's avatar
Iustin Pop committed
50
51

  Subclasses must follow these rules:
52
53
    - implement ExpandNames
    - implement CheckPrereq
Iustin Pop's avatar
Iustin Pop committed
54
55
56
    - implement Exec
    - implement BuildHooksEnv
    - redefine HPATH and HTYPE
57
58
    - optionally redefine their run requirements:
        REQ_MASTER: the LU needs to run on the master node
59
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60
61

  Note that all commands require root permissions.
Iustin Pop's avatar
Iustin Pop committed
62
63
64
65
66
67

  """
  HPATH = None
  HTYPE = None
  _OP_REQP = []
  REQ_MASTER = True
68
  REQ_BGL = True
Iustin Pop's avatar
Iustin Pop committed
69

70
  def __init__(self, processor, op, context):
Iustin Pop's avatar
Iustin Pop committed
71
72
73
74
75
76
    """Constructor for LogicalUnit.

    This needs to be overriden in derived classes in order to check op
    validity.

    """
Iustin Pop's avatar
Iustin Pop committed
77
    self.proc = processor
Iustin Pop's avatar
Iustin Pop committed
78
    self.op = op
Guido Trotter's avatar
Guido Trotter committed
79
80
    self.cfg = context.cfg
    self.context = context
81
    # Dicts used to declare locking needs to mcpu
82
    self.needed_locks = None
83
    self.acquired_locks = {}
Guido Trotter's avatar
Guido Trotter committed
84
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
85
86
    self.add_locks = {}
    self.remove_locks = {}
87
88
    # Used to force good behavior when calling helper functions
    self.recalculate_locks = {}
89
90
    self.__ssh = None

Iustin Pop's avatar
Iustin Pop committed
91
92
93
    for attr_name in self._OP_REQP:
      attr_val = getattr(op, attr_name, None)
      if attr_val is None:
94
95
        raise errors.OpPrereqError("Required parameter '%s' missing" %
                                   attr_name)
96

Guido Trotter's avatar
Guido Trotter committed
97
    if not self.cfg.IsCluster():
98
99
100
      raise errors.OpPrereqError("Cluster not initialized yet,"
                                 " use 'gnt-cluster init' first.")
    if self.REQ_MASTER:
Michael Hanselmann's avatar
Michael Hanselmann committed
101
      master = self.cfg.GetMasterNode()
102
103
104
      if master != utils.HostInfo().name:
        raise errors.OpPrereqError("Commands must be run on the master"
                                   " node %s" % master)
Iustin Pop's avatar
Iustin Pop committed
105

106
107
108
109
110
  def __GetSSH(self):
    """Returns the SshRunner object

    """
    if not self.__ssh:
111
      self.__ssh = ssh.SshRunner(self.cfg.GetClusterName())
112
113
114
115
    return self.__ssh

  ssh = property(fget=__GetSSH)

116
117
118
119
120
121
122
123
124
125
126
127
128
129
  def ExpandNames(self):
    """Expand names for this LU.

    This method is called before starting to execute the opcode, and it should
    update all the parameters of the opcode to their canonical form (e.g. a
    short node name must be fully expanded after this method has successfully
    completed). This way locking, hooks, logging, ecc. can work correctly.

    LUs which implement this method must also populate the self.needed_locks
    member, as a dict with lock levels as keys, and a list of needed lock names
    as values. Rules:
      - Use an empty dict if you don't need any lock
      - If you don't need any lock at a particular level omit that level
      - Don't put anything for the BGL level
130
      - If you want all locks at a level use locking.ALL_SET as a value
131

Guido Trotter's avatar
Guido Trotter committed
132
133
134
135
    If you need to share locks (rather than acquire them exclusively) at one
    level you can modify self.share_locks, setting a true value (usually 1) for
    that level. By default locks are not shared.

136
137
138
    Examples:
    # Acquire all nodes and one instance
    self.needed_locks = {
139
      locking.LEVEL_NODE: locking.ALL_SET,
140
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
    }
    # Acquire just two nodes
    self.needed_locks = {
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
    }
    # Acquire no locks
    self.needed_locks = {} # No, you can't leave it to the default value None

    """
    # The implementation of this method is mandatory only if the new LU is
    # concurrent, so that old LUs don't need to be changed all at the same
    # time.
    if self.REQ_BGL:
      self.needed_locks = {} # Exclusive LUs don't need locks.
    else:
      raise NotImplementedError

Guido Trotter's avatar
Guido Trotter committed
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
  def DeclareLocks(self, level):
    """Declare LU locking needs for a level

    While most LUs can just declare their locking needs at ExpandNames time,
    sometimes there's the need to calculate some locks after having acquired
    the ones before. This function is called just before acquiring locks at a
    particular level, but after acquiring the ones at lower levels, and permits
    such calculations. It can be used to modify self.needed_locks, and by
    default it does nothing.

    This function is only called if you have something already set in
    self.needed_locks for the level.

    @param level: Locking level which is going to be locked
    @type level: member of ganeti.locking.LEVELS

    """

Iustin Pop's avatar
Iustin Pop committed
176
177
178
179
180
181
182
183
184
185
186
187
  def CheckPrereq(self):
    """Check prerequisites for this LU.

    This method should check that the prerequisites for the execution
    of this LU are fulfilled. It can do internode communication, but
    it should be idempotent - no cluster or system changes are
    allowed.

    The method should raise errors.OpPrereqError in case something is
    not fulfilled. Its return value is ignored.

    This method should also update all the parameters of the opcode to
188
    their canonical form if it hasn't been done by ExpandNames before.
Iustin Pop's avatar
Iustin Pop committed
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216

    """
    raise NotImplementedError

  def Exec(self, feedback_fn):
    """Execute the LU.

    This method should implement the actual work. It should raise
    errors.OpExecError for failures that are somewhat dealt with in
    code, or expected.

    """
    raise NotImplementedError

  def BuildHooksEnv(self):
    """Build hooks environment for this LU.

    This method should return a three-node tuple consisting of: a dict
    containing the environment that will be used for running the
    specific hook for this LU, a list of node names on which the hook
    should run before the execution, and a list of node names on which
    the hook should run after the execution.

    The keys of the dict must not have 'GANETI_' prefixed as this will
    be handled in the hooks runner. Also note additional keys will be
    added by the hooks runner. If the LU doesn't define any
    environment, an empty dict (and not None) should be returned.

217
    No nodes should be returned as an empty list (and not None).
Iustin Pop's avatar
Iustin Pop committed
218
219
220
221
222
223
224

    Note that if the HPATH for a LU class is None, this function will
    not be called.

    """
    raise NotImplementedError

225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
    """Notify the LU about the results of its hooks.

    This method is called every time a hooks phase is executed, and notifies
    the Logical Unit about the hooks' result. The LU can then use it to alter
    its result based on the hooks.  By default the method does nothing and the
    previous result is passed back unchanged but any LU can define it if it
    wants to use the local cluster hook-scripts somehow.

    Args:
      phase: the hooks phase that has just been run
      hooks_results: the results of the multi-node hooks rpc call
      feedback_fn: function to send feedback back to the caller
      lu_result: the previous result this LU had, or None in the PRE phase.

    """
    return lu_result

243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
  def _ExpandAndLockInstance(self):
    """Helper function to expand and lock an instance.

    Many LUs that work on an instance take its name in self.op.instance_name
    and need to expand it and then declare the expanded name for locking. This
    function does it, and then updates self.op.instance_name to the expanded
    name. It also initializes needed_locks as a dict, if this hasn't been done
    before.

    """
    if self.needed_locks is None:
      self.needed_locks = {}
    else:
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
        "_ExpandAndLockInstance called with instance-level locks set"
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
    if expanded_name is None:
      raise errors.OpPrereqError("Instance '%s' not known" %
                                  self.op.instance_name)
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
    self.op.instance_name = expanded_name

265
  def _LockInstancesNodes(self, primary_only=False):
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
    """Helper function to declare instances' nodes for locking.

    This function should be called after locking one or more instances to lock
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
    with all primary or secondary nodes for instances already locked and
    present in self.needed_locks[locking.LEVEL_INSTANCE].

    It should be called from DeclareLocks, and for safety only works if
    self.recalculate_locks[locking.LEVEL_NODE] is set.

    In the future it may grow parameters to just lock some instance's nodes, or
    to just lock primaries or secondary nodes, if needed.

    If should be called in DeclareLocks in a way similar to:

    if level == locking.LEVEL_NODE:
      self._LockInstancesNodes()

284
285
286
    @type primary_only: boolean
    @param primary_only: only lock primary nodes of locked instances

287
288
289
290
291
292
293
294
295
296
    """
    assert locking.LEVEL_NODE in self.recalculate_locks, \
      "_LockInstancesNodes helper function called with no nodes to recalculate"

    # TODO: check if we're really been called with the instance locks held

    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
    # future we might want to have different behaviors depending on the value
    # of self.recalculate_locks[locking.LEVEL_NODE]
    wanted_nodes = []
297
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
298
299
      instance = self.context.cfg.GetInstanceInfo(instance_name)
      wanted_nodes.append(instance.primary_node)
300
301
      if not primary_only:
        wanted_nodes.extend(instance.secondary_nodes)
302
303
304
305
306

    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
307
308
309

    del self.recalculate_locks[locking.LEVEL_NODE]

Iustin Pop's avatar
Iustin Pop committed
310
311
312
313
314
315
316
317
318
319
320
321

class NoHooksLU(LogicalUnit):
  """Simple LU which runs no hooks.

  This LU is intended as a parent for other LogicalUnits which will
  run no hooks, in order to reduce duplicate code.

  """
  HPATH = None
  HTYPE = None


322
def _GetWantedNodes(lu, nodes):
323
  """Returns list of checked and expanded node names.
324
325
326
327
328

  Args:
    nodes: List of nodes (strings) or None for all

  """
329
  if not isinstance(nodes, list):
330
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
331

332
333
334
  if not nodes:
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
      " non-empty list of nodes whose name is to be expanded.")
335

336
337
338
339
340
341
  wanted = []
  for name in nodes:
    node = lu.cfg.ExpandNodeName(name)
    if node is None:
      raise errors.OpPrereqError("No such node name '%s'" % name)
    wanted.append(node)
342

343
  return utils.NiceSort(wanted)
344
345
346


def _GetWantedInstances(lu, instances):
347
  """Returns list of checked and expanded instance names.
348
349
350
351
352
353
354
355
356
357
358
359

  Args:
    instances: List of instances (strings) or None for all

  """
  if not isinstance(instances, list):
    raise errors.OpPrereqError("Invalid argument type 'instances'")

  if instances:
    wanted = []

    for name in instances:
360
      instance = lu.cfg.ExpandInstanceName(name)
361
362
363
364
365
      if instance is None:
        raise errors.OpPrereqError("No such instance name '%s'" % name)
      wanted.append(instance)

  else:
366
367
    wanted = lu.cfg.GetInstanceList()
  return utils.NiceSort(wanted)
368
369
370


def _CheckOutputFields(static, dynamic, selected):
371
372
373
374
375
376
377
378
379
  """Checks whether all selected fields are valid.

  Args:
    static: Static fields
    dynamic: Dynamic fields

  """
  static_fields = frozenset(static)
  dynamic_fields = frozenset(dynamic)
380

381
  all_fields = static_fields | dynamic_fields
382

383
  if not all_fields.issuperset(selected):
384
385
386
    raise errors.OpPrereqError("Unknown output fields selected: %s"
                               % ",".join(frozenset(selected).
                                          difference(all_fields)))
387
388


389
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
390
                          memory, vcpus, nics):
391
392
393
394
  """Builds instance related env variables for hooks from single variables.

  Args:
    secondary_nodes: List of secondary nodes as strings
395
396
  """
  env = {
397
    "OP_TARGET": name,
398
399
400
    "INSTANCE_NAME": name,
    "INSTANCE_PRIMARY": primary_node,
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
401
    "INSTANCE_OS_TYPE": os_type,
402
403
404
405
406
407
408
    "INSTANCE_STATUS": status,
    "INSTANCE_MEMORY": memory,
    "INSTANCE_VCPUS": vcpus,
  }

  if nics:
    nic_count = len(nics)
409
    for idx, (ip, bridge, mac) in enumerate(nics):
410
411
412
413
      if ip is None:
        ip = ""
      env["INSTANCE_NIC%d_IP" % idx] = ip
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
414
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
415
416
417
418
419
420
421
422
423
  else:
    nic_count = 0

  env["INSTANCE_NIC_COUNT"] = nic_count

  return env


def _BuildInstanceHookEnvByObject(instance, override=None):
424
425
426
427
428
429
  """Builds instance related env variables for hooks from an object.

  Args:
    instance: objects.Instance object of instance
    override: dict of values to override
  """
430
431
432
433
  args = {
    'name': instance.name,
    'primary_node': instance.primary_node,
    'secondary_nodes': instance.secondary_nodes,
434
    'os_type': instance.os,
435
436
437
    'status': instance.os,
    'memory': instance.memory,
    'vcpus': instance.vcpus,
438
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
439
440
441
442
443
444
  }
  if override:
    args.update(override)
  return _BuildInstanceHookEnv(**args)


445
446
447
448
449
450
451
452
453
454
455
456
def _CheckInstanceBridgesExist(instance):
  """Check that the brigdes needed by an instance exist.

  """
  # check bridges existance
  brlist = [nic.bridge for nic in instance.nics]
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
    raise errors.OpPrereqError("one or more target bridges %s does not"
                               " exist on destination node '%s'" %
                               (brlist, instance.primary_node))


Iustin Pop's avatar
Iustin Pop committed
457
458
459
460
461
462
463
464
465
466
467
468
469
470
class LUDestroyCluster(NoHooksLU):
  """Logical unit for destroying the cluster.

  """
  _OP_REQP = []

  def CheckPrereq(self):
    """Check prerequisites.

    This checks whether the cluster is empty.

    Any errors are signalled by raising errors.OpPrereqError.

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
471
    master = self.cfg.GetMasterNode()
Iustin Pop's avatar
Iustin Pop committed
472
473

    nodelist = self.cfg.GetNodeList()
474
    if len(nodelist) != 1 or nodelist[0] != master:
475
476
      raise errors.OpPrereqError("There are still %d node(s) in"
                                 " this cluster." % (len(nodelist) - 1))
477
478
    instancelist = self.cfg.GetInstanceList()
    if instancelist:
479
480
      raise errors.OpPrereqError("There are still %d instance(s) in"
                                 " this cluster." % len(instancelist))
Iustin Pop's avatar
Iustin Pop committed
481
482
483
484
485

  def Exec(self, feedback_fn):
    """Destroys the cluster.

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
486
    master = self.cfg.GetMasterNode()
487
    if not rpc.call_node_stop_master(master, False):
488
      raise errors.OpExecError("Could not disable the master role")
489
490
491
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
    utils.CreateBackup(priv_key)
    utils.CreateBackup(pub_key)
Iustin Pop's avatar
Iustin Pop committed
492
    return master
Iustin Pop's avatar
Iustin Pop committed
493
494


Guido Trotter's avatar
Guido Trotter committed
495
class LUVerifyCluster(LogicalUnit):
Iustin Pop's avatar
Iustin Pop committed
496
497
498
  """Verifies the cluster status.

  """
Guido Trotter's avatar
Guido Trotter committed
499
500
  HPATH = "cluster-verify"
  HTYPE = constants.HTYPE_CLUSTER
501
  _OP_REQP = ["skip_checks"]
502
503
504
505
506
507
508
509
  REQ_BGL = False

  def ExpandNames(self):
    self.needed_locks = {
      locking.LEVEL_NODE: locking.ALL_SET,
      locking.LEVEL_INSTANCE: locking.ALL_SET,
    }
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
Iustin Pop's avatar
Iustin Pop committed
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524

  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
                  remote_version, feedback_fn):
    """Run multiple tests against a node.

    Test list:
      - compares ganeti version
      - checks vg existance and size > 20G
      - checks config file checksum
      - checks ssh to other nodes

    Args:
      node: name of the node to check
      file_list: required list of files
      local_cksum: dictionary of local files and their checksums
525

Iustin Pop's avatar
Iustin Pop committed
526
527
528
529
    """
    # compares ganeti version
    local_version = constants.PROTOCOL_VERSION
    if not remote_version:
530
      feedback_fn("  - ERROR: connection to %s failed" % (node))
Iustin Pop's avatar
Iustin Pop committed
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
      return True

    if local_version != remote_version:
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
                      (local_version, node, remote_version))
      return True

    # checks vg existance and size > 20G

    bad = False
    if not vglist:
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
                      (node,))
      bad = True
    else:
546
547
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
                                            constants.MIN_VG_SIZE)
Iustin Pop's avatar
Iustin Pop committed
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
      if vgstatus:
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
        bad = True

    # checks config file checksum
    # checks ssh to any

    if 'filelist' not in node_result:
      bad = True
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
    else:
      remote_cksum = node_result['filelist']
      for file_name in file_list:
        if file_name not in remote_cksum:
          bad = True
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
        elif remote_cksum[file_name] != local_cksum[file_name]:
          bad = True
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)

    if 'nodelist' not in node_result:
      bad = True
570
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
Iustin Pop's avatar
Iustin Pop committed
571
572
573
574
    else:
      if node_result['nodelist']:
        bad = True
        for node in node_result['nodelist']:
575
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
Iustin Pop's avatar
Iustin Pop committed
576
                          (node, node_result['nodelist'][node]))
577
578
579
580
581
582
583
584
585
586
587
    if 'node-net-test' not in node_result:
      bad = True
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
    else:
      if node_result['node-net-test']:
        bad = True
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
        for node in nlist:
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
                          (node, node_result['node-net-test'][node]))

Iustin Pop's avatar
Iustin Pop committed
588
589
590
591
592
    hyp_result = node_result.get('hypervisor', None)
    if hyp_result is not None:
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
    return bad

593
594
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
                      node_instance, feedback_fn):
Iustin Pop's avatar
Iustin Pop committed
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
    """Verify an instance.

    This function checks to see if the required block devices are
    available on the instance's node.

    """
    bad = False

    node_current = instanceconfig.primary_node

    node_vol_should = {}
    instanceconfig.MapLVsByNode(node_vol_should)

    for node in node_vol_should:
      for volume in node_vol_should[node]:
        if node not in node_vol_is or volume not in node_vol_is[node]:
          feedback_fn("  - ERROR: volume %s missing on node %s" %
                          (volume, node))
          bad = True

    if not instanceconfig.status == 'down':
616
617
      if (node_current not in node_instance or
          not instance in node_instance[node_current]):
Iustin Pop's avatar
Iustin Pop committed
618
619
620
621
622
623
624
625
626
627
628
        feedback_fn("  - ERROR: instance %s not running on node %s" %
                        (instance, node_current))
        bad = True

    for node in node_instance:
      if (not node == node_current):
        if instance in node_instance[node]:
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
                          (instance, node))
          bad = True

629
    return bad
Iustin Pop's avatar
Iustin Pop committed
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662

  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
    """Verify if there are any unknown volumes in the cluster.

    The .os, .swap and backup volumes are ignored. All other volumes are
    reported as unknown.

    """
    bad = False

    for node in node_vol_is:
      for volume in node_vol_is[node]:
        if node not in node_vol_should or volume not in node_vol_should[node]:
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
                      (volume, node))
          bad = True
    return bad

  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
    """Verify the list of running instances.

    This checks what instances are running but unknown to the cluster.

    """
    bad = False
    for node in node_instance:
      for runninginstance in node_instance[node]:
        if runninginstance not in instancelist:
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
                          (runninginstance, node))
          bad = True
    return bad

663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
    """Verify N+1 Memory Resilience.

    Check that if one single node dies we can still start all the instances it
    was primary for.

    """
    bad = False

    for node, nodeinfo in node_info.iteritems():
      # This code checks that every node which is now listed as secondary has
      # enough memory to host all instances it is supposed to should a single
      # other node in the cluster fail.
      # FIXME: not ready for failover to an arbitrary node
      # FIXME: does not support file-backed instances
      # WARNING: we currently take into account down instances as well as up
      # ones, considering that even if they're down someone might want to start
      # them even in the event of a node failure.
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
        needed_mem = 0
        for instance in instances:
          needed_mem += instance_cfg[instance].memory
        if nodeinfo['mfree'] < needed_mem:
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
                      " failovers should node %s fail" % (node, prinode))
          bad = True
    return bad

Iustin Pop's avatar
Iustin Pop committed
691
692
693
  def CheckPrereq(self):
    """Check prerequisites.

694
695
    Transform the list of checks we're going to skip into a set and check that
    all its members are valid.
Iustin Pop's avatar
Iustin Pop committed
696
697

    """
698
699
700
    self.skip_set = frozenset(self.op.skip_checks)
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
Iustin Pop's avatar
Iustin Pop committed
701

Guido Trotter's avatar
Guido Trotter committed
702
703
704
705
706
707
708
709
710
711
712
713
  def BuildHooksEnv(self):
    """Build hooks env.

    Cluster-Verify hooks just rone in the post phase and their failure makes
    the output be logged in the verify output and the verification to fail.

    """
    all_nodes = self.cfg.GetNodeList()
    # TODO: populate the environment with useful information for verify hooks
    env = {}
    return env, [], all_nodes

Iustin Pop's avatar
Iustin Pop committed
714
715
716
717
718
719
  def Exec(self, feedback_fn):
    """Verify integrity of cluster, performing various test on nodes.

    """
    bad = False
    feedback_fn("* Verifying global settings")
720
721
    for msg in self.cfg.VerifyConfig():
      feedback_fn("  - ERROR: %s" % msg)
Iustin Pop's avatar
Iustin Pop committed
722
723
724

    vg_name = self.cfg.GetVGName()
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
725
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
Iustin Pop's avatar
Iustin Pop committed
726
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
727
    i_non_redundant = [] # Non redundant instances
Iustin Pop's avatar
Iustin Pop committed
728
729
    node_volume = {}
    node_instance = {}
730
    node_info = {}
Guido Trotter's avatar
Guido Trotter committed
731
    instance_cfg = {}
Iustin Pop's avatar
Iustin Pop committed
732
733
734

    # FIXME: verify OS list
    # do local checksums
Michael Hanselmann's avatar
Michael Hanselmann committed
735
    file_names = []
736
737
    file_names.append(constants.SSL_CERT_FILE)
    file_names.append(constants.CLUSTER_CONF_FILE)
Iustin Pop's avatar
Iustin Pop committed
738
739
740
741
742
743
744
745
746
747
    local_checksums = utils.FingerprintFiles(file_names)

    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
    all_instanceinfo = rpc.call_instance_list(nodelist)
    all_vglist = rpc.call_vg_list(nodelist)
    node_verify_param = {
      'filelist': file_names,
      'nodelist': nodelist,
      'hypervisor': None,
748
749
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
                        for node in nodeinfo]
Iustin Pop's avatar
Iustin Pop committed
750
751
752
      }
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
    all_rversion = rpc.call_version(nodelist)
753
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
Iustin Pop's avatar
Iustin Pop committed
754
755
756
757
758
759
760
761
762
763
764

    for node in nodelist:
      feedback_fn("* Verifying node %s" % node)
      result = self._VerifyNode(node, file_names, local_checksums,
                                all_vglist[node], all_nvinfo[node],
                                all_rversion[node], feedback_fn)
      bad = bad or result

      # node_volume
      volumeinfo = all_volumeinfo[node]

765
766
767
768
769
770
      if isinstance(volumeinfo, basestring):
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
                    (node, volumeinfo[-400:].encode('string_escape')))
        bad = True
        node_volume[node] = {}
      elif not isinstance(volumeinfo, dict):
Iustin Pop's avatar
Iustin Pop committed
771
772
773
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue
774
775
      else:
        node_volume[node] = volumeinfo
Iustin Pop's avatar
Iustin Pop committed
776
777
778
779
780
781
782
783
784
785

      # node_instance
      nodeinstance = all_instanceinfo[node]
      if type(nodeinstance) != list:
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue

      node_instance[node] = nodeinstance

786
787
788
789
790
791
792
793
794
795
796
      # node_info
      nodeinfo = all_ninfo[node]
      if not isinstance(nodeinfo, dict):
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue

      try:
        node_info[node] = {
          "mfree": int(nodeinfo['memory_free']),
          "dfree": int(nodeinfo['vg_free']),
797
798
          "pinst": [],
          "sinst": [],
799
800
801
802
803
804
805
          # dictionary holding all instances this node is secondary for,
          # grouped by their primary node. Each key is a cluster node, and each
          # value is a list of instances which have the key as primary and the
          # current node as secondary.  this is handy to calculate N+1 memory
          # availability if you can only failover from a primary to its
          # secondary.
          "sinst-by-pnode": {},
806
807
808
809
810
811
        }
      except ValueError:
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
        bad = True
        continue

Iustin Pop's avatar
Iustin Pop committed
812
813
814
815
816
    node_vol_should = {}

    for instance in instancelist:
      feedback_fn("* Verifying instance %s" % instance)
      inst_config = self.cfg.GetInstanceInfo(instance)
817
818
819
      result =  self._VerifyInstance(instance, inst_config, node_volume,
                                     node_instance, feedback_fn)
      bad = bad or result
Iustin Pop's avatar
Iustin Pop committed
820
821
822

      inst_config.MapLVsByNode(node_vol_should)

Guido Trotter's avatar
Guido Trotter committed
823
824
      instance_cfg[instance] = inst_config

825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
      pnode = inst_config.primary_node
      if pnode in node_info:
        node_info[pnode]['pinst'].append(instance)
      else:
        feedback_fn("  - ERROR: instance %s, connection to primary node"
                    " %s failed" % (instance, pnode))
        bad = True

      # If the instance is non-redundant we cannot survive losing its primary
      # node, so we are not N+1 compliant. On the other hand we have no disk
      # templates with more than one secondary so that situation is not well
      # supported either.
      # FIXME: does not support file-backed instances
      if len(inst_config.secondary_nodes) == 0:
        i_non_redundant.append(instance)
      elif len(inst_config.secondary_nodes) > 1:
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
                    % instance)

      for snode in inst_config.secondary_nodes:
        if snode in node_info:
          node_info[snode]['sinst'].append(instance)
847
848
849
          if pnode not in node_info[snode]['sinst-by-pnode']:
            node_info[snode]['sinst-by-pnode'][pnode] = []
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
850
851
852
853
        else:
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
                      " %s failed" % (instance, snode))

Iustin Pop's avatar
Iustin Pop committed
854
855
856
857
858
859
860
861
862
863
    feedback_fn("* Verifying orphan volumes")
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
                                       feedback_fn)
    bad = bad or result

    feedback_fn("* Verifying remaining instances")
    result = self._VerifyOrphanInstances(instancelist, node_instance,
                                         feedback_fn)
    bad = bad or result

864
865
866
867
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
      feedback_fn("* Verifying N+1 Memory redundancy")
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
      bad = bad or result
868
869
870
871
872
873

    feedback_fn("* Other Notes")
    if i_non_redundant:
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
                  % len(i_non_redundant))

874
    return not bad
Iustin Pop's avatar
Iustin Pop committed
875

Guido Trotter's avatar
Guido Trotter committed
876
877
878
879
880
881
882
883
884
885
886
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
    """Analize the post-hooks' result, handle it, and send some
    nicely-formatted feedback back to the user.

    Args:
      phase: the hooks phase that has just been run
      hooks_results: the results of the multi-node hooks rpc call
      feedback_fn: function to send feedback back to the caller
      lu_result: previous Exec result

    """
Iustin Pop's avatar
Iustin Pop committed
887
888
    # We only really run POST phase hooks, and are only interested in
    # their results
Guido Trotter's avatar
Guido Trotter committed
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
    if phase == constants.HOOKS_PHASE_POST:
      # Used to change hooks' output to proper indentation
      indent_re = re.compile('^', re.M)
      feedback_fn("* Hooks Results")
      if not hooks_results:
        feedback_fn("  - ERROR: general communication failure")
        lu_result = 1
      else:
        for node_name in hooks_results:
          show_node_header = True
          res = hooks_results[node_name]
          if res is False or not isinstance(res, list):
            feedback_fn("    Communication failure")
            lu_result = 1
            continue
          for script, hkr, output in res:
            if hkr == constants.HKR_FAIL:
              # The node header is only shown once, if there are
              # failing hooks on that node
              if show_node_header:
                feedback_fn("  Node %s:" % node_name)
                show_node_header = False
              feedback_fn("    ERROR: Script %s failed, output:" % script)
              output = indent_re.sub('      ', output)
              feedback_fn("%s" % output)
              lu_result = 1

      return lu_result

Iustin Pop's avatar
Iustin Pop committed
918

Iustin Pop's avatar
Iustin Pop committed
919
920
921
922
923
class LUVerifyDisks(NoHooksLU):
  """Verifies the cluster disks status.

  """
  _OP_REQP = []
924
925
926
927
928
929
930
931
  REQ_BGL = False

  def ExpandNames(self):
    self.needed_locks = {
      locking.LEVEL_NODE: locking.ALL_SET,
      locking.LEVEL_INSTANCE: locking.ALL_SET,
    }
    self.share_locks = dict(((i, 1) for i in locking.LEVELS))
Iustin Pop's avatar
Iustin Pop committed
932
933
934
935
936
937
938
939
940
941
942
943
944

  def CheckPrereq(self):
    """Check prerequisites.

    This has no prerequisites.

    """
    pass

  def Exec(self, feedback_fn):
    """Verify integrity of cluster disks.

    """
945
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
Iustin Pop's avatar
Iustin Pop committed
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973

    vg_name = self.cfg.GetVGName()
    nodes = utils.NiceSort(self.cfg.GetNodeList())
    instances = [self.cfg.GetInstanceInfo(name)
                 for name in self.cfg.GetInstanceList()]

    nv_dict = {}
    for inst in instances:
      inst_lvs = {}
      if (inst.status != "up" or
          inst.disk_template not in constants.DTS_NET_MIRROR):
        continue
      inst.MapLVsByNode(inst_lvs)
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
      for node, vol_list in inst_lvs.iteritems():
        for vol in vol_list:
          nv_dict[(node, vol)] = inst

    if not nv_dict:
      return result

    node_lvs = rpc.call_volume_list(nodes, vg_name)

    to_act = set()
    for node in nodes:
      # node_volume
      lvs = node_lvs[node]

974
975
976
977
      if isinstance(lvs, basestring):
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
        res_nlvm[node] = lvs
      elif not isinstance(lvs, dict):
Iustin Pop's avatar
Iustin Pop committed
978
979
980
981
982
983
        logger.Info("connection to node %s failed or invalid data returned" %
                    (node,))
        res_nodes.append(node)
        continue

      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
984
985
986
        inst = nv_dict.pop((node, lv_name), None)
        if (not lv_online and inst is not None
            and inst.name not in res_instances):
Iustin Pop's avatar
Iustin Pop committed
987
          res_instances.append(inst.name)
Iustin Pop's avatar
Iustin Pop committed
988

989
990
991
992
993
994
995
    # any leftover items in nv_dict are missing LVs, let's arrange the
    # data better
    for key, inst in nv_dict.iteritems():
      if inst.name not in res_missing:
        res_missing[inst.name] = []
      res_missing[inst.name].append(key)

Iustin Pop's avatar
Iustin Pop committed
996
997
998
    return result


999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
class LURenameCluster(LogicalUnit):
  """Rename the cluster.

  """
  HPATH = "cluster-rename"
  HTYPE = constants.HTYPE_CLUSTER
  _OP_REQP = ["name"]

  def BuildHooksEnv(self):
    """Build hooks env.

    """
    env = {
Michael Hanselmann's avatar
Michael Hanselmann committed
1012
      "OP_TARGET": self.cfg.GetClusterName(),
1013
1014
      "NEW_NAME": self.op.name,
      }
Michael Hanselmann's avatar
Michael Hanselmann committed
1015
    mn = self.cfg.GetMasterNode()
1016
1017
1018
1019
1020
1021
    return env, [mn], [mn]

  def CheckPrereq(self):
    """Verify that the passed name is a valid one.

    """
1022
    hostname = utils.HostInfo(self.op.name)
1023

1024
1025
    new_name = hostname.name
    self.ip = new_ip = hostname.ip
Michael Hanselmann's avatar
Michael Hanselmann committed
1026
1027
    old_name = self.cfg.GetClusterName()
    old_ip = self.cfg.GetMasterIP()
1028
1029
1030
1031
    if new_name == old_name and new_ip == old_ip:
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
                                 " cluster has changed")
    if new_ip != old_ip:
1032
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
                                   " reachable on the network. Aborting." %
                                   new_ip)

    self.op.name = new_name

  def Exec(self, feedback_fn):
    """Rename the cluster.

    """
    clustername = self.op.name
    ip = self.ip

    # shutdown the master IP
Michael Hanselmann's avatar
Michael Hanselmann committed
1047
    master = self.cfg.GetMasterNode()
1048
    if not rpc.call_node_stop_master(master, False):
1049
1050
1051
1052
      raise errors.OpExecError("Could not disable the master role")

    try:
      # modify the sstore
Michael Hanselmann's avatar
Michael Hanselmann committed
1053
      # TODO: sstore
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
      ss.SetKey(ss.SS_MASTER_IP, ip)
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)

      # Distribute updated ss config to all nodes
      myself = self.cfg.GetNodeInfo(master)
      dist_nodes = self.cfg.GetNodeList()
      if myself.name in dist_nodes:
        dist_nodes.remove(myself.name)

      logger.Debug("Copying updated ssconf data to all nodes")
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
        fname = ss.KeyToFilename(keyname)
        result = rpc.call_upload_file(dist_nodes, fname)
        for to_node in dist_nodes:
          if not result[to_node]:
            logger.Error("copy of file %s to node %s failed" %
                         (fname, to_node))
    finally:
1072
      if not rpc.call_node_start_master(master, False):
1073
1074
        logger.Error("Could not re-enable the master role on the master,"
                     " please restart manually.")
1075
1076


1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
def _RecursiveCheckIfLVMBased(disk):
  """Check if the given disk or its children are lvm-based.

  Args:
    disk: ganeti.objects.Disk object

  Returns:
    boolean indicating whether a LD_LV dev_type was found or not

  """
  if disk.children:
    for chdisk in disk.children:
      if _RecursiveCheckIfLVMBased(chdisk):
        return True
  return disk.dev_type == constants.LD_LV


class LUSetClusterParams(LogicalUnit):
  """Change the parameters of the cluster.

  """
  HPATH = "cluster-modify"
  HTYPE = constants.HTYPE_CLUSTER
  _OP_REQP = []
1101
1102
1103
1104
1105
1106
1107
1108
1109
  REQ_BGL = False

  def ExpandNames(self):
    # FIXME: in the future maybe other cluster params won't require checking on
    # all nodes to be modified.
    self.needed_locks = {
      locking.LEVEL_NODE: locking.ALL_SET,
    }
    self.share_locks[locking.LEVEL_NODE] = 1
1110
1111
1112
1113
1114
1115

  def BuildHooksEnv(self):
    """Build hooks env.

    """
    env = {
Michael Hanselmann's avatar
Michael Hanselmann committed
1116
      "OP_TARGET": self.cfg.GetClusterName(),
1117
1118
      "NEW_VG_NAME": self.op.vg_name,
      }
Michael Hanselmann's avatar
Michael Hanselmann committed
1119
    mn = self.cfg.GetMasterNode()
1120
1121
1122
1123
1124
1125
    return env, [mn], [mn]

  def CheckPrereq(self):
    """Check prerequisites.

    This checks whether the given params don't conflict and
1126
    if the given volume group is valid.
1127
1128

    """
1129
1130
    # FIXME: This only works because there is only one parameter that can be
    # changed or removed.
1131
    if not self.op.vg_name:
1132
      instances = self.cfg.GetAllInstancesInfo().values()
1133
1134
1135
1136
1137
1138
1139
1140
      for inst in instances:
        for disk in inst.disks:
          if _RecursiveCheckIfLVMBased(disk):
            raise errors.OpPrereqError("Cannot disable lvm storage while"
                                       " lvm-based instances exist")

    # if vg_name not None, checks given volume group on all nodes
    if self.op.vg_name:
1141
      node_list = self.acquired_locks[locking.LEVEL_NODE]
1142
1143
      vglist = rpc.call_vg_list(node_list)
      for node in node_list:
1144
1145
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
                                              constants.MIN_VG_SIZE)
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
        if vgstatus:
          raise errors.OpPrereqError("Error on node '%s': %s" %
                                     (node, vgstatus))

  def Exec(self, feedback_fn):
    """Change the parameters of the cluster.

    """
    if self.op.vg_name != self.cfg.GetVGName():
      self.cfg.SetVGName(self.op.vg_name)
    else:
      feedback_fn("Cluster LVM configuration already in desired"
                  " state, not changing")


Iustin Pop's avatar
Iustin Pop committed
1161
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
Iustin Pop's avatar
Iustin Pop committed
1162
1163
1164
1165
1166
1167
1168
  """Sleep and poll for an instance's disk to sync.

  """
  if not instance.disks:
    return True

  if not oneshot:
Iustin Pop's avatar
Iustin Pop committed
1169
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
Iustin Pop's avatar
Iustin Pop committed
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182

  node = instance.primary_node

  for dev in instance.disks:
    cfgw.SetDiskID(dev, node)

  retries = 0
  while True:
    max_time = 0
    done = True
    cumul_degraded = False
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
    if not rstats:
Iustin Pop's avatar
Iustin Pop committed
1183
      proc.LogWarning("Can't get any data from node %s" % node)
Iustin Pop's avatar
Iustin Pop committed
1184
1185
      retries += 1
      if retries >= 10:
1186
1187
        raise errors.RemoteError("Can't contact node %s for mirror data,"
                                 " aborting." % node)
Iustin Pop's avatar
Iustin Pop committed
1188
1189
1190
1191
1192
1193
      time.sleep(6)
      continue
    retries = 0
    for i in range(len(rstats)):
      mstat = rstats[i]
      if mstat is None:
Iustin Pop's avatar
Iustin Pop committed
1194
        proc.LogWarning("Can't compute data for node %s/%s" %
Iustin Pop's avatar
Iustin Pop committed
1195
1196
                        (node, instance.disks[i].iv_name))
        continue
1197
1198
      # we ignore the ldisk parameter
      perc_done, est_time, is_degraded, _ = mstat
Iustin Pop's avatar
Iustin Pop committed
1199
1200
1201
1202
1203
1204
1205
1206
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
      if perc_done is not None:
        done = False
        if est_time is not None:
          rem_time = "%d estimated seconds remaining" % est_time
          max_time = est_time
        else:
          rem_time = "no time estimate"
Iustin Pop's avatar
Iustin Pop committed
1207
1208
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
                     (instance.disks[i].iv_name, perc_done, rem_time))
Iustin Pop's avatar
Iustin Pop committed
1209
1210
1211
    if done or oneshot:
      break

Iustin Pop's avatar
Iustin Pop committed
1212
    time.sleep(min(60, max_time))
Iustin Pop's avatar
Iustin Pop committed
1213
1214

  if done:
Iustin Pop's avatar
Iustin Pop committed
1215
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
Iustin Pop's avatar
Iustin Pop committed
1216
1217
1218
  return not cumul_degraded


1219
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
Iustin Pop's avatar
Iustin Pop committed
1220
1221
  """Check that mirrors are not degraded.

1222
1223
1224
1225
  The ldisk parameter, if True, will change the test from the
  is_degraded attribute (which represents overall non-ok status for
  the device(s)) to the ldisk (representing the local storage status).

Iustin Pop's avatar
Iustin Pop committed
1226
1227
  """
  cfgw.SetDiskID(dev, node)
1228
1229
1230
1231
  if ldisk:
    idx = 6
  else:
    idx = 5
Iustin Pop's avatar
Iustin Pop committed
1232
1233
1234
1235
1236

  result = True
  if on_primary or dev.AssembleOnSecondary():
    rstats = rpc.call_blockdev_find(node, dev)
    if not rstats:
1237
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
Iustin Pop's avatar
Iustin Pop committed
1238
1239
      result = False
    else:
1240
      result = result and (not rstats[idx])
Iustin Pop's avatar
Iustin Pop committed
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
  if dev.children:
    for child in dev.children:
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)

  return result


class LUDiagnoseOS(NoHooksLU):
  """Logical unit for OS diagnose/query.

  """
1252
  _OP_REQP = ["output_fields", "names"]
Guido Trotter's avatar
Guido Trotter committed
1253
  REQ_BGL = False
Iustin Pop's avatar
Iustin Pop committed
1254

Guido Trotter's avatar
Guido Trotter committed
1255
  def ExpandNames(self):
1256
1257
1258
1259
1260
1261
1262
1263
    if self.op.names:
      raise errors.OpPrereqError("Selective OS query not supported")

    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
    _CheckOutputFields(static=[],
                       dynamic=self.dynamic_fields,
                       selected=self.op.output_fields)

Guido Trotter's avatar
Guido Trotter committed
1264
1265
1266
    # Lock all nodes, in shared mode
    self.needed_locks = {}
    self.share_locks[locking.LEVEL_NODE] = 1
1267
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
Guido Trotter's avatar
Guido Trotter committed
1268
1269
1270
1271
1272
1273

  def CheckPrereq(self):
    """Check prerequisites.

    """

1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
  @staticmethod
  def _DiagnoseByOS(node_list, rlist):
    """Remaps a per-node return list into an a per-os per-node dictionary

      Args:
        node_list: a list with the names of all nodes
        rlist: a map with node names as keys and OS objects as values

      Returns:
        map: a map with osnames as keys and as value another map, with
             nodes as
             keys and list of OS objects as values
             e.g. {"debian-etch": {"node1": [<object>,...],
                                   "node2": [<object>,]}
                  }

    """
    all_os = {}
    for node_name, nr in rlist.iteritems():
      if not nr:
        continue
Iustin Pop's avatar
Iustin Pop committed
1295
1296
      for os_obj in nr:
        if os_obj.name not in all_os:
1297
1298
          # build a list of nodes for this os containing empty lists
          # for each node in node_list
Iustin Pop's avatar
Iustin Pop committed
1299
          all_os[os_obj.name] = {}
1300
          for nname in node_list:
Iustin Pop's avatar
Iustin Pop committed
1301
1302
            all_os[os_obj.name][nname] = []
        all_os[os_obj.name][node_name].append(os_obj)
1303
    return all_os
Iustin Pop's avatar
Iustin Pop committed
1304
1305
1306
1307
1308

  def Exec(self, feedback_fn):
    """Compute the list of OSes.

    """
Guido Trotter's avatar
Guido Trotter committed
1309
    node_list = self.acquired_locks[locking.LEVEL_NODE]
Iustin Pop's avatar
Iustin Pop committed
1310
1311
    node_data = rpc.call_os_diagnose(node_list)
    if node_data == False:
1312
      raise errors.OpExecError("Can't gather the list of OSes")
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
    pol = self._DiagnoseByOS(node_list, node_data)
    output = []
    for os_name, os_data in pol.iteritems():
      row = []
      for field in self.op.output_fields:
        if field == "name":
          val = os_name
        elif field == "valid":
          val = utils.all([osl and osl[0] for osl in os_data.values()])
        elif field == "node_status":
          val = {}
          for node_name, nos_list in os_data.iteritems():
            val[node_name] = [(v.status, v.path) for v in nos_list]
        else:
          raise errors.ParameterError(field)
        row.append(val)
      output.append(row)

    return output
Iustin Pop's avatar
Iustin Pop committed
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345


class LURemoveNode(LogicalUnit):
  """Logical unit for removing a node.

  """
  HPATH = "node-remove"
  HTYPE = constants.HTYPE_NODE
  _OP_REQP = ["node_name"]

  def BuildHooksEnv(self):
    """Build hooks env.

    This doesn't run on the target node in the pre phase as a failed
1346
    node would then be impossible to remove.
Iustin Pop's avatar
Iustin Pop committed
1347
1348

    """
1349
    env = {
1350
      "OP_TARGET": self.op.node_name,
1351
1352
      "NODE_NAME": self.op.node_name,
      }
Iustin Pop's avatar
Iustin Pop committed
1353
1354
    all_nodes = self.cfg.GetNodeList()
    all_nodes.remove(self.op.node_name)
1355
    return env, all_nodes, all_nodes
Iustin Pop's avatar
Iustin Pop committed
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369

  def CheckPrereq(self):
    """Check prerequisites.

    This checks:
     - the node exists in the configuration
     - it does not have primary or secondary instances
     - it's not the master

    Any errors are signalled by raising errors.OpPrereqError.

    """
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
    if node is None:
1370
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
Iustin Pop's avatar
Iustin Pop committed
1371
1372
1373

    instance_list = self.cfg.GetInstanceList()

Michael Hanselmann's avatar
Michael Hanselmann committed
1374
    masternode = self.cfg.GetMasterNode()
Iustin Pop's avatar
Iustin Pop committed
1375
    if node.name == masternode:
1376
1377
      raise errors.OpPrereqError("Node is the master node,"
                                 " you need to failover first.")
Iustin Pop's avatar
Iustin Pop committed
1378
1379
1380
1381

    for instance_name in instance_list:
      instance = self.cfg.GetInstanceInfo(instance_name)
      if node.name == instance.primary_node:
1382
1383
        raise errors.OpPrereqError("Instance %s still running on the node,"
                                   " please remove first." % instance_name)
Iustin Pop's avatar
Iustin Pop committed
1384
      if node.name in instance.secondary_nodes:
1385
1386
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
                                   " please remove first." % instance_name)
Iustin Pop's avatar
Iustin Pop committed
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
    self.op.node_name = node.name
    self.node = node

  def Exec(self, feedback_fn):
    """Removes the node from the cluster.

    """
    node = self.node
    logger.Info("stopping the node daemon and removing configs from node %s" %
                node.name)

1398
    self.context.RemoveNode(node.name)
Iustin Pop's avatar
Iustin Pop committed
1399

1400
    rpc.call_node_leave_cluster(node.name)
1401

Iustin Pop's avatar
Iustin Pop committed
1402
1403
1404
1405
1406

class LUQueryNodes(NoHooksLU):
  """Logical unit for querying nodes.

  """
1407
  _OP_REQP = ["output_fields", "names"]
Guido Trotter's avatar
Guido Trotter committed
1408
  REQ_BGL = False
Iustin Pop's avatar
Iustin Pop committed
1409

Guido Trotter's avatar
Guido Trotter committed
1410
  def ExpandNames(self):
1411
1412
1413
1414
1415
1416
    self.dynamic_fields = frozenset([
      "dtotal", "dfree",
      "mtotal", "mnode", "mfree",
      "bootid",
      "ctotal",
      ])
Iustin Pop's avatar
Iustin Pop committed
1417

1418
1419
1420
1421
    self.static_fields = frozenset([
      "name", "pinst_cnt", "sinst_cnt",
      "pinst_list", "sinst_list",
      "pip", "sip", "tags",
1422
      "serial_no",
1423
1424
1425
      ])

    _CheckOutputFields(static=self.static_fields,
1426
1427
                       dynamic=self.dynamic_fields,
                       selected=self.op.output_fields)
Iustin Pop's avatar
Iustin Pop committed
1428

Guido Trotter's avatar
Guido Trotter committed
1429
1430
    self.needed_locks = {}
    self.share_locks[locking.LEVEL_NODE] = 1
1431
1432
1433

    if self.op.names:
      self.wanted = _GetWantedNodes(self, self.op.names)
Guido Trotter's avatar
Guido Trotter committed
1434
    else:
1435
1436
1437
1438
1439
1440
1441
      self.wanted = locking.ALL_SET

    self.do_locking = not self.static_fields.issuperset(self.op.output_fields)
    if self.do_locking:
      # if we don't request only static fields, we need to lock the nodes
      self.needed_locks[locking.LEVEL_NODE] = self.wanted

Guido Trotter's avatar
Guido Trotter committed
1442
1443
1444
1445
1446

  def CheckPrereq(self):
    """Check prerequisites.

    """
1447
1448
1449
    # The validation of the node list is done in the _GetWantedNodes,
    # if non empty, and if empty, there's no validation to do
    pass
Iustin Pop's avatar
Iustin Pop committed
1450
1451
1452
1453
1454

  def Exec(self, feedback_fn):
    """Computes the list of nodes and their attributes.

    """
1455
1456
1457
    all_info = self.cfg.GetAllNodesInfo()
    if self.do_locking:
      nodenames = self.acquired_locks[locking.LEVEL_NODE]
1458
1459
1460
1461
1462
1463
    elif self.</