cmdlib.py 182 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
#
Iustin Pop's avatar
Iustin Pop committed
2
3
#

4
# Copyright (C) 2006, 2007, 2008 Google Inc.
Iustin Pop's avatar
Iustin Pop committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


22
"""Module implementing the master-side code."""
Iustin Pop's avatar
Iustin Pop committed
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

# pylint: disable-msg=W0613,W0201

import os
import os.path
import sha
import time
import tempfile
import re
import platform

from ganeti import rpc
from ganeti import ssh
from ganeti import logger
from ganeti import utils
from ganeti import errors
from ganeti import hypervisor
Guido Trotter's avatar
Guido Trotter committed
40
from ganeti import locking
Iustin Pop's avatar
Iustin Pop committed
41
42
43
from ganeti import constants
from ganeti import objects
from ganeti import opcodes
44
from ganeti import serializer
45
46


Iustin Pop's avatar
Iustin Pop committed
47
class LogicalUnit(object):
48
  """Logical Unit base class.
Iustin Pop's avatar
Iustin Pop committed
49
50

  Subclasses must follow these rules:
51
52
    - implement ExpandNames
    - implement CheckPrereq
Iustin Pop's avatar
Iustin Pop committed
53
54
55
    - implement Exec
    - implement BuildHooksEnv
    - redefine HPATH and HTYPE
56
57
58
    - optionally redefine their run requirements:
        REQ_MASTER: the LU needs to run on the master node
        REQ_WSSTORE: the LU needs a writable SimpleStore
59
        REQ_BGL: the LU needs to hold the Big Ganeti Lock exclusively
60
61

  Note that all commands require root permissions.
Iustin Pop's avatar
Iustin Pop committed
62
63
64
65
66
67

  """
  HPATH = None
  HTYPE = None
  _OP_REQP = []
  REQ_MASTER = True
68
  REQ_WSSTORE = False
69
  REQ_BGL = True
Iustin Pop's avatar
Iustin Pop committed
70

Guido Trotter's avatar
Guido Trotter committed
71
  def __init__(self, processor, op, context, sstore):
Iustin Pop's avatar
Iustin Pop committed
72
73
74
75
76
77
    """Constructor for LogicalUnit.

    This needs to be overriden in derived classes in order to check op
    validity.

    """
Iustin Pop's avatar
Iustin Pop committed
78
    self.proc = processor
Iustin Pop's avatar
Iustin Pop committed
79
    self.op = op
Guido Trotter's avatar
Guido Trotter committed
80
    self.cfg = context.cfg
Iustin Pop's avatar
Iustin Pop committed
81
    self.sstore = sstore
Guido Trotter's avatar
Guido Trotter committed
82
    self.context = context
83
    self.needed_locks = None
84
    self.acquired_locks = {}
Guido Trotter's avatar
Guido Trotter committed
85
    self.share_locks = dict(((i, 0) for i in locking.LEVELS))
86
87
    # Used to force good behavior when calling helper functions
    self.recalculate_locks = {}
88
89
    self.__ssh = None

Iustin Pop's avatar
Iustin Pop committed
90
91
92
    for attr_name in self._OP_REQP:
      attr_val = getattr(op, attr_name, None)
      if attr_val is None:
93
94
        raise errors.OpPrereqError("Required parameter '%s' missing" %
                                   attr_name)
95

Guido Trotter's avatar
Guido Trotter committed
96
    if not self.cfg.IsCluster():
97
98
99
100
101
102
103
      raise errors.OpPrereqError("Cluster not initialized yet,"
                                 " use 'gnt-cluster init' first.")
    if self.REQ_MASTER:
      master = sstore.GetMasterNode()
      if master != utils.HostInfo().name:
        raise errors.OpPrereqError("Commands must be run on the master"
                                   " node %s" % master)
Iustin Pop's avatar
Iustin Pop committed
104

105
106
107
108
109
  def __GetSSH(self):
    """Returns the SshRunner object

    """
    if not self.__ssh:
110
      self.__ssh = ssh.SshRunner(self.sstore)
111
112
113
114
    return self.__ssh

  ssh = property(fget=__GetSSH)

115
116
117
118
119
120
121
122
123
124
125
126
127
128
  def ExpandNames(self):
    """Expand names for this LU.

    This method is called before starting to execute the opcode, and it should
    update all the parameters of the opcode to their canonical form (e.g. a
    short node name must be fully expanded after this method has successfully
    completed). This way locking, hooks, logging, ecc. can work correctly.

    LUs which implement this method must also populate the self.needed_locks
    member, as a dict with lock levels as keys, and a list of needed lock names
    as values. Rules:
      - Use an empty dict if you don't need any lock
      - If you don't need any lock at a particular level omit that level
      - Don't put anything for the BGL level
129
      - If you want all locks at a level use locking.ALL_SET as a value
130

Guido Trotter's avatar
Guido Trotter committed
131
132
133
134
    If you need to share locks (rather than acquire them exclusively) at one
    level you can modify self.share_locks, setting a true value (usually 1) for
    that level. By default locks are not shared.

135
136
137
    Examples:
    # Acquire all nodes and one instance
    self.needed_locks = {
138
      locking.LEVEL_NODE: locking.ALL_SET,
139
      locking.LEVEL_INSTANCE: ['instance1.example.tld'],
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
    }
    # Acquire just two nodes
    self.needed_locks = {
      locking.LEVEL_NODE: ['node1.example.tld', 'node2.example.tld'],
    }
    # Acquire no locks
    self.needed_locks = {} # No, you can't leave it to the default value None

    """
    # The implementation of this method is mandatory only if the new LU is
    # concurrent, so that old LUs don't need to be changed all at the same
    # time.
    if self.REQ_BGL:
      self.needed_locks = {} # Exclusive LUs don't need locks.
    else:
      raise NotImplementedError

Guido Trotter's avatar
Guido Trotter committed
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
  def DeclareLocks(self, level):
    """Declare LU locking needs for a level

    While most LUs can just declare their locking needs at ExpandNames time,
    sometimes there's the need to calculate some locks after having acquired
    the ones before. This function is called just before acquiring locks at a
    particular level, but after acquiring the ones at lower levels, and permits
    such calculations. It can be used to modify self.needed_locks, and by
    default it does nothing.

    This function is only called if you have something already set in
    self.needed_locks for the level.

    @param level: Locking level which is going to be locked
    @type level: member of ganeti.locking.LEVELS

    """

Iustin Pop's avatar
Iustin Pop committed
175
176
177
178
179
180
181
182
183
184
185
186
  def CheckPrereq(self):
    """Check prerequisites for this LU.

    This method should check that the prerequisites for the execution
    of this LU are fulfilled. It can do internode communication, but
    it should be idempotent - no cluster or system changes are
    allowed.

    The method should raise errors.OpPrereqError in case something is
    not fulfilled. Its return value is ignored.

    This method should also update all the parameters of the opcode to
187
    their canonical form if it hasn't been done by ExpandNames before.
Iustin Pop's avatar
Iustin Pop committed
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215

    """
    raise NotImplementedError

  def Exec(self, feedback_fn):
    """Execute the LU.

    This method should implement the actual work. It should raise
    errors.OpExecError for failures that are somewhat dealt with in
    code, or expected.

    """
    raise NotImplementedError

  def BuildHooksEnv(self):
    """Build hooks environment for this LU.

    This method should return a three-node tuple consisting of: a dict
    containing the environment that will be used for running the
    specific hook for this LU, a list of node names on which the hook
    should run before the execution, and a list of node names on which
    the hook should run after the execution.

    The keys of the dict must not have 'GANETI_' prefixed as this will
    be handled in the hooks runner. Also note additional keys will be
    added by the hooks runner. If the LU doesn't define any
    environment, an empty dict (and not None) should be returned.

216
    No nodes should be returned as an empty list (and not None).
Iustin Pop's avatar
Iustin Pop committed
217
218
219
220
221
222
223

    Note that if the HPATH for a LU class is None, this function will
    not be called.

    """
    raise NotImplementedError

224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
  def HooksCallBack(self, phase, hook_results, feedback_fn, lu_result):
    """Notify the LU about the results of its hooks.

    This method is called every time a hooks phase is executed, and notifies
    the Logical Unit about the hooks' result. The LU can then use it to alter
    its result based on the hooks.  By default the method does nothing and the
    previous result is passed back unchanged but any LU can define it if it
    wants to use the local cluster hook-scripts somehow.

    Args:
      phase: the hooks phase that has just been run
      hooks_results: the results of the multi-node hooks rpc call
      feedback_fn: function to send feedback back to the caller
      lu_result: the previous result this LU had, or None in the PRE phase.

    """
    return lu_result

242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
  def _ExpandAndLockInstance(self):
    """Helper function to expand and lock an instance.

    Many LUs that work on an instance take its name in self.op.instance_name
    and need to expand it and then declare the expanded name for locking. This
    function does it, and then updates self.op.instance_name to the expanded
    name. It also initializes needed_locks as a dict, if this hasn't been done
    before.

    """
    if self.needed_locks is None:
      self.needed_locks = {}
    else:
      assert locking.LEVEL_INSTANCE not in self.needed_locks, \
        "_ExpandAndLockInstance called with instance-level locks set"
    expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name)
    if expanded_name is None:
      raise errors.OpPrereqError("Instance '%s' not known" %
                                  self.op.instance_name)
    self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name
    self.op.instance_name = expanded_name

264
  def _LockInstancesNodes(self, primary_only=False):
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
    """Helper function to declare instances' nodes for locking.

    This function should be called after locking one or more instances to lock
    their nodes. Its effect is populating self.needed_locks[locking.LEVEL_NODE]
    with all primary or secondary nodes for instances already locked and
    present in self.needed_locks[locking.LEVEL_INSTANCE].

    It should be called from DeclareLocks, and for safety only works if
    self.recalculate_locks[locking.LEVEL_NODE] is set.

    In the future it may grow parameters to just lock some instance's nodes, or
    to just lock primaries or secondary nodes, if needed.

    If should be called in DeclareLocks in a way similar to:

    if level == locking.LEVEL_NODE:
      self._LockInstancesNodes()

283
284
285
    @type primary_only: boolean
    @param primary_only: only lock primary nodes of locked instances

286
287
288
289
290
291
292
293
294
295
    """
    assert locking.LEVEL_NODE in self.recalculate_locks, \
      "_LockInstancesNodes helper function called with no nodes to recalculate"

    # TODO: check if we're really been called with the instance locks held

    # For now we'll replace self.needed_locks[locking.LEVEL_NODE], but in the
    # future we might want to have different behaviors depending on the value
    # of self.recalculate_locks[locking.LEVEL_NODE]
    wanted_nodes = []
296
    for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
297
298
      instance = self.context.cfg.GetInstanceInfo(instance_name)
      wanted_nodes.append(instance.primary_node)
299
300
      if not primary_only:
        wanted_nodes.extend(instance.secondary_nodes)
301
302
303
304
305

    if self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_REPLACE:
      self.needed_locks[locking.LEVEL_NODE] = wanted_nodes
    elif self.recalculate_locks[locking.LEVEL_NODE] == constants.LOCKS_APPEND:
      self.needed_locks[locking.LEVEL_NODE].extend(wanted_nodes)
306
307
308

    del self.recalculate_locks[locking.LEVEL_NODE]

Iustin Pop's avatar
Iustin Pop committed
309
310
311
312
313
314
315
316
317
318
319
320

class NoHooksLU(LogicalUnit):
  """Simple LU which runs no hooks.

  This LU is intended as a parent for other LogicalUnits which will
  run no hooks, in order to reduce duplicate code.

  """
  HPATH = None
  HTYPE = None


321
def _GetWantedNodes(lu, nodes):
322
  """Returns list of checked and expanded node names.
323
324
325
326
327

  Args:
    nodes: List of nodes (strings) or None for all

  """
328
  if not isinstance(nodes, list):
329
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
330

331
332
333
  if not nodes:
    raise errors.ProgrammerError("_GetWantedNodes should only be called with a"
      " non-empty list of nodes whose name is to be expanded.")
334

335
336
337
338
339
340
  wanted = []
  for name in nodes:
    node = lu.cfg.ExpandNodeName(name)
    if node is None:
      raise errors.OpPrereqError("No such node name '%s'" % name)
    wanted.append(node)
341

342
  return utils.NiceSort(wanted)
343
344
345


def _GetWantedInstances(lu, instances):
346
  """Returns list of checked and expanded instance names.
347
348
349
350
351
352
353
354
355
356
357
358

  Args:
    instances: List of instances (strings) or None for all

  """
  if not isinstance(instances, list):
    raise errors.OpPrereqError("Invalid argument type 'instances'")

  if instances:
    wanted = []

    for name in instances:
359
      instance = lu.cfg.ExpandInstanceName(name)
360
361
362
363
364
      if instance is None:
        raise errors.OpPrereqError("No such instance name '%s'" % name)
      wanted.append(instance)

  else:
365
366
    wanted = lu.cfg.GetInstanceList()
  return utils.NiceSort(wanted)
367
368
369


def _CheckOutputFields(static, dynamic, selected):
370
371
372
373
374
375
376
377
378
  """Checks whether all selected fields are valid.

  Args:
    static: Static fields
    dynamic: Dynamic fields

  """
  static_fields = frozenset(static)
  dynamic_fields = frozenset(dynamic)
379

380
  all_fields = static_fields | dynamic_fields
381

382
  if not all_fields.issuperset(selected):
383
384
385
    raise errors.OpPrereqError("Unknown output fields selected: %s"
                               % ",".join(frozenset(selected).
                                          difference(all_fields)))
386
387


388
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
389
                          memory, vcpus, nics):
390
391
392
393
  """Builds instance related env variables for hooks from single variables.

  Args:
    secondary_nodes: List of secondary nodes as strings
394
395
  """
  env = {
396
    "OP_TARGET": name,
397
398
399
    "INSTANCE_NAME": name,
    "INSTANCE_PRIMARY": primary_node,
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
400
    "INSTANCE_OS_TYPE": os_type,
401
402
403
404
405
406
407
    "INSTANCE_STATUS": status,
    "INSTANCE_MEMORY": memory,
    "INSTANCE_VCPUS": vcpus,
  }

  if nics:
    nic_count = len(nics)
408
    for idx, (ip, bridge, mac) in enumerate(nics):
409
410
411
412
      if ip is None:
        ip = ""
      env["INSTANCE_NIC%d_IP" % idx] = ip
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
413
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
414
415
416
417
418
419
420
421
422
  else:
    nic_count = 0

  env["INSTANCE_NIC_COUNT"] = nic_count

  return env


def _BuildInstanceHookEnvByObject(instance, override=None):
423
424
425
426
427
428
  """Builds instance related env variables for hooks from an object.

  Args:
    instance: objects.Instance object of instance
    override: dict of values to override
  """
429
430
431
432
  args = {
    'name': instance.name,
    'primary_node': instance.primary_node,
    'secondary_nodes': instance.secondary_nodes,
433
    'os_type': instance.os,
434
435
436
    'status': instance.os,
    'memory': instance.memory,
    'vcpus': instance.vcpus,
437
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
438
439
440
441
442
443
  }
  if override:
    args.update(override)
  return _BuildInstanceHookEnv(**args)


444
445
446
447
448
449
450
451
452
453
454
455
def _CheckInstanceBridgesExist(instance):
  """Check that the brigdes needed by an instance exist.

  """
  # check bridges existance
  brlist = [nic.bridge for nic in instance.nics]
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
    raise errors.OpPrereqError("one or more target bridges %s does not"
                               " exist on destination node '%s'" %
                               (brlist, instance.primary_node))


Iustin Pop's avatar
Iustin Pop committed
456
457
458
459
460
461
462
463
464
465
466
467
468
469
class LUDestroyCluster(NoHooksLU):
  """Logical unit for destroying the cluster.

  """
  _OP_REQP = []

  def CheckPrereq(self):
    """Check prerequisites.

    This checks whether the cluster is empty.

    Any errors are signalled by raising errors.OpPrereqError.

    """
470
    master = self.sstore.GetMasterNode()
Iustin Pop's avatar
Iustin Pop committed
471
472

    nodelist = self.cfg.GetNodeList()
473
    if len(nodelist) != 1 or nodelist[0] != master:
474
475
      raise errors.OpPrereqError("There are still %d node(s) in"
                                 " this cluster." % (len(nodelist) - 1))
476
477
    instancelist = self.cfg.GetInstanceList()
    if instancelist:
478
479
      raise errors.OpPrereqError("There are still %d instance(s) in"
                                 " this cluster." % len(instancelist))
Iustin Pop's avatar
Iustin Pop committed
480
481
482
483
484

  def Exec(self, feedback_fn):
    """Destroys the cluster.

    """
485
    master = self.sstore.GetMasterNode()
486
    if not rpc.call_node_stop_master(master, False):
487
      raise errors.OpExecError("Could not disable the master role")
488
489
490
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
    utils.CreateBackup(priv_key)
    utils.CreateBackup(pub_key)
Iustin Pop's avatar
Iustin Pop committed
491
    return master
Iustin Pop's avatar
Iustin Pop committed
492
493


Guido Trotter's avatar
Guido Trotter committed
494
class LUVerifyCluster(LogicalUnit):
Iustin Pop's avatar
Iustin Pop committed
495
496
497
  """Verifies the cluster status.

  """
Guido Trotter's avatar
Guido Trotter committed
498
499
  HPATH = "cluster-verify"
  HTYPE = constants.HTYPE_CLUSTER
500
  _OP_REQP = ["skip_checks"]
Iustin Pop's avatar
Iustin Pop committed
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515

  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
                  remote_version, feedback_fn):
    """Run multiple tests against a node.

    Test list:
      - compares ganeti version
      - checks vg existance and size > 20G
      - checks config file checksum
      - checks ssh to other nodes

    Args:
      node: name of the node to check
      file_list: required list of files
      local_cksum: dictionary of local files and their checksums
516

Iustin Pop's avatar
Iustin Pop committed
517
518
519
520
    """
    # compares ganeti version
    local_version = constants.PROTOCOL_VERSION
    if not remote_version:
521
      feedback_fn("  - ERROR: connection to %s failed" % (node))
Iustin Pop's avatar
Iustin Pop committed
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
      return True

    if local_version != remote_version:
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
                      (local_version, node, remote_version))
      return True

    # checks vg existance and size > 20G

    bad = False
    if not vglist:
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
                      (node,))
      bad = True
    else:
537
538
      vgstatus = utils.CheckVolumeGroupSize(vglist, self.cfg.GetVGName(),
                                            constants.MIN_VG_SIZE)
Iustin Pop's avatar
Iustin Pop committed
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
      if vgstatus:
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
        bad = True

    # checks config file checksum
    # checks ssh to any

    if 'filelist' not in node_result:
      bad = True
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
    else:
      remote_cksum = node_result['filelist']
      for file_name in file_list:
        if file_name not in remote_cksum:
          bad = True
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
        elif remote_cksum[file_name] != local_cksum[file_name]:
          bad = True
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)

    if 'nodelist' not in node_result:
      bad = True
561
      feedback_fn("  - ERROR: node hasn't returned node ssh connectivity data")
Iustin Pop's avatar
Iustin Pop committed
562
563
564
565
    else:
      if node_result['nodelist']:
        bad = True
        for node in node_result['nodelist']:
566
          feedback_fn("  - ERROR: ssh communication with node '%s': %s" %
Iustin Pop's avatar
Iustin Pop committed
567
                          (node, node_result['nodelist'][node]))
568
569
570
571
572
573
574
575
576
577
578
    if 'node-net-test' not in node_result:
      bad = True
      feedback_fn("  - ERROR: node hasn't returned node tcp connectivity data")
    else:
      if node_result['node-net-test']:
        bad = True
        nlist = utils.NiceSort(node_result['node-net-test'].keys())
        for node in nlist:
          feedback_fn("  - ERROR: tcp communication with node '%s': %s" %
                          (node, node_result['node-net-test'][node]))

Iustin Pop's avatar
Iustin Pop committed
579
580
581
582
583
    hyp_result = node_result.get('hypervisor', None)
    if hyp_result is not None:
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
    return bad

584
585
  def _VerifyInstance(self, instance, instanceconfig, node_vol_is,
                      node_instance, feedback_fn):
Iustin Pop's avatar
Iustin Pop committed
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
    """Verify an instance.

    This function checks to see if the required block devices are
    available on the instance's node.

    """
    bad = False

    node_current = instanceconfig.primary_node

    node_vol_should = {}
    instanceconfig.MapLVsByNode(node_vol_should)

    for node in node_vol_should:
      for volume in node_vol_should[node]:
        if node not in node_vol_is or volume not in node_vol_is[node]:
          feedback_fn("  - ERROR: volume %s missing on node %s" %
                          (volume, node))
          bad = True

    if not instanceconfig.status == 'down':
607
608
      if (node_current not in node_instance or
          not instance in node_instance[node_current]):
Iustin Pop's avatar
Iustin Pop committed
609
610
611
612
613
614
615
616
617
618
619
        feedback_fn("  - ERROR: instance %s not running on node %s" %
                        (instance, node_current))
        bad = True

    for node in node_instance:
      if (not node == node_current):
        if instance in node_instance[node]:
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
                          (instance, node))
          bad = True

620
    return bad
Iustin Pop's avatar
Iustin Pop committed
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653

  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
    """Verify if there are any unknown volumes in the cluster.

    The .os, .swap and backup volumes are ignored. All other volumes are
    reported as unknown.

    """
    bad = False

    for node in node_vol_is:
      for volume in node_vol_is[node]:
        if node not in node_vol_should or volume not in node_vol_should[node]:
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
                      (volume, node))
          bad = True
    return bad

  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
    """Verify the list of running instances.

    This checks what instances are running but unknown to the cluster.

    """
    bad = False
    for node in node_instance:
      for runninginstance in node_instance[node]:
        if runninginstance not in instancelist:
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
                          (runninginstance, node))
          bad = True
    return bad

654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
  def _VerifyNPlusOneMemory(self, node_info, instance_cfg, feedback_fn):
    """Verify N+1 Memory Resilience.

    Check that if one single node dies we can still start all the instances it
    was primary for.

    """
    bad = False

    for node, nodeinfo in node_info.iteritems():
      # This code checks that every node which is now listed as secondary has
      # enough memory to host all instances it is supposed to should a single
      # other node in the cluster fail.
      # FIXME: not ready for failover to an arbitrary node
      # FIXME: does not support file-backed instances
      # WARNING: we currently take into account down instances as well as up
      # ones, considering that even if they're down someone might want to start
      # them even in the event of a node failure.
      for prinode, instances in nodeinfo['sinst-by-pnode'].iteritems():
        needed_mem = 0
        for instance in instances:
          needed_mem += instance_cfg[instance].memory
        if nodeinfo['mfree'] < needed_mem:
          feedback_fn("  - ERROR: not enough memory on node %s to accomodate"
                      " failovers should node %s fail" % (node, prinode))
          bad = True
    return bad

Iustin Pop's avatar
Iustin Pop committed
682
683
684
  def CheckPrereq(self):
    """Check prerequisites.

685
686
    Transform the list of checks we're going to skip into a set and check that
    all its members are valid.
Iustin Pop's avatar
Iustin Pop committed
687
688

    """
689
690
691
    self.skip_set = frozenset(self.op.skip_checks)
    if not constants.VERIFY_OPTIONAL_CHECKS.issuperset(self.skip_set):
      raise errors.OpPrereqError("Invalid checks to be skipped specified")
Iustin Pop's avatar
Iustin Pop committed
692

Guido Trotter's avatar
Guido Trotter committed
693
694
695
696
697
698
699
700
701
702
703
704
  def BuildHooksEnv(self):
    """Build hooks env.

    Cluster-Verify hooks just rone in the post phase and their failure makes
    the output be logged in the verify output and the verification to fail.

    """
    all_nodes = self.cfg.GetNodeList()
    # TODO: populate the environment with useful information for verify hooks
    env = {}
    return env, [], all_nodes

Iustin Pop's avatar
Iustin Pop committed
705
706
707
708
709
710
  def Exec(self, feedback_fn):
    """Verify integrity of cluster, performing various test on nodes.

    """
    bad = False
    feedback_fn("* Verifying global settings")
711
712
    for msg in self.cfg.VerifyConfig():
      feedback_fn("  - ERROR: %s" % msg)
Iustin Pop's avatar
Iustin Pop committed
713
714
715

    vg_name = self.cfg.GetVGName()
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
716
    nodeinfo = [self.cfg.GetNodeInfo(nname) for nname in nodelist]
Iustin Pop's avatar
Iustin Pop committed
717
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
718
    i_non_redundant = [] # Non redundant instances
Iustin Pop's avatar
Iustin Pop committed
719
720
    node_volume = {}
    node_instance = {}
721
    node_info = {}
Guido Trotter's avatar
Guido Trotter committed
722
    instance_cfg = {}
Iustin Pop's avatar
Iustin Pop committed
723
724
725

    # FIXME: verify OS list
    # do local checksums
726
727
728
    file_names = list(self.sstore.GetFileList())
    file_names.append(constants.SSL_CERT_FILE)
    file_names.append(constants.CLUSTER_CONF_FILE)
Iustin Pop's avatar
Iustin Pop committed
729
730
731
732
733
734
735
736
737
738
    local_checksums = utils.FingerprintFiles(file_names)

    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
    all_instanceinfo = rpc.call_instance_list(nodelist)
    all_vglist = rpc.call_vg_list(nodelist)
    node_verify_param = {
      'filelist': file_names,
      'nodelist': nodelist,
      'hypervisor': None,
739
740
      'node-net-test': [(node.name, node.primary_ip, node.secondary_ip)
                        for node in nodeinfo]
Iustin Pop's avatar
Iustin Pop committed
741
742
743
      }
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
    all_rversion = rpc.call_version(nodelist)
744
    all_ninfo = rpc.call_node_info(nodelist, self.cfg.GetVGName())
Iustin Pop's avatar
Iustin Pop committed
745
746
747
748
749
750
751
752
753
754
755

    for node in nodelist:
      feedback_fn("* Verifying node %s" % node)
      result = self._VerifyNode(node, file_names, local_checksums,
                                all_vglist[node], all_nvinfo[node],
                                all_rversion[node], feedback_fn)
      bad = bad or result

      # node_volume
      volumeinfo = all_volumeinfo[node]

756
757
758
759
760
761
      if isinstance(volumeinfo, basestring):
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
                    (node, volumeinfo[-400:].encode('string_escape')))
        bad = True
        node_volume[node] = {}
      elif not isinstance(volumeinfo, dict):
Iustin Pop's avatar
Iustin Pop committed
762
763
764
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue
765
766
      else:
        node_volume[node] = volumeinfo
Iustin Pop's avatar
Iustin Pop committed
767
768
769
770
771
772
773
774
775
776

      # node_instance
      nodeinstance = all_instanceinfo[node]
      if type(nodeinstance) != list:
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue

      node_instance[node] = nodeinstance

777
778
779
780
781
782
783
784
785
786
787
      # node_info
      nodeinfo = all_ninfo[node]
      if not isinstance(nodeinfo, dict):
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
        bad = True
        continue

      try:
        node_info[node] = {
          "mfree": int(nodeinfo['memory_free']),
          "dfree": int(nodeinfo['vg_free']),
788
789
          "pinst": [],
          "sinst": [],
790
791
792
793
794
795
796
          # dictionary holding all instances this node is secondary for,
          # grouped by their primary node. Each key is a cluster node, and each
          # value is a list of instances which have the key as primary and the
          # current node as secondary.  this is handy to calculate N+1 memory
          # availability if you can only failover from a primary to its
          # secondary.
          "sinst-by-pnode": {},
797
798
799
800
801
802
        }
      except ValueError:
        feedback_fn("  - ERROR: invalid value returned from node %s" % (node,))
        bad = True
        continue

Iustin Pop's avatar
Iustin Pop committed
803
804
805
806
807
    node_vol_should = {}

    for instance in instancelist:
      feedback_fn("* Verifying instance %s" % instance)
      inst_config = self.cfg.GetInstanceInfo(instance)
808
809
810
      result =  self._VerifyInstance(instance, inst_config, node_volume,
                                     node_instance, feedback_fn)
      bad = bad or result
Iustin Pop's avatar
Iustin Pop committed
811
812
813

      inst_config.MapLVsByNode(node_vol_should)

Guido Trotter's avatar
Guido Trotter committed
814
815
      instance_cfg[instance] = inst_config

816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
      pnode = inst_config.primary_node
      if pnode in node_info:
        node_info[pnode]['pinst'].append(instance)
      else:
        feedback_fn("  - ERROR: instance %s, connection to primary node"
                    " %s failed" % (instance, pnode))
        bad = True

      # If the instance is non-redundant we cannot survive losing its primary
      # node, so we are not N+1 compliant. On the other hand we have no disk
      # templates with more than one secondary so that situation is not well
      # supported either.
      # FIXME: does not support file-backed instances
      if len(inst_config.secondary_nodes) == 0:
        i_non_redundant.append(instance)
      elif len(inst_config.secondary_nodes) > 1:
        feedback_fn("  - WARNING: multiple secondaries for instance %s"
                    % instance)

      for snode in inst_config.secondary_nodes:
        if snode in node_info:
          node_info[snode]['sinst'].append(instance)
838
839
840
          if pnode not in node_info[snode]['sinst-by-pnode']:
            node_info[snode]['sinst-by-pnode'][pnode] = []
          node_info[snode]['sinst-by-pnode'][pnode].append(instance)
841
842
843
844
        else:
          feedback_fn("  - ERROR: instance %s, connection to secondary node"
                      " %s failed" % (instance, snode))

Iustin Pop's avatar
Iustin Pop committed
845
846
847
848
849
850
851
852
853
854
    feedback_fn("* Verifying orphan volumes")
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
                                       feedback_fn)
    bad = bad or result

    feedback_fn("* Verifying remaining instances")
    result = self._VerifyOrphanInstances(instancelist, node_instance,
                                         feedback_fn)
    bad = bad or result

855
856
857
858
    if constants.VERIFY_NPLUSONE_MEM not in self.skip_set:
      feedback_fn("* Verifying N+1 Memory redundancy")
      result = self._VerifyNPlusOneMemory(node_info, instance_cfg, feedback_fn)
      bad = bad or result
859
860
861
862
863
864

    feedback_fn("* Other Notes")
    if i_non_redundant:
      feedback_fn("  - NOTICE: %d non-redundant instance(s) found."
                  % len(i_non_redundant))

865
    return not bad
Iustin Pop's avatar
Iustin Pop committed
866

Guido Trotter's avatar
Guido Trotter committed
867
868
869
870
871
872
873
874
875
876
877
  def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
    """Analize the post-hooks' result, handle it, and send some
    nicely-formatted feedback back to the user.

    Args:
      phase: the hooks phase that has just been run
      hooks_results: the results of the multi-node hooks rpc call
      feedback_fn: function to send feedback back to the caller
      lu_result: previous Exec result

    """
Iustin Pop's avatar
Iustin Pop committed
878
879
    # We only really run POST phase hooks, and are only interested in
    # their results
Guido Trotter's avatar
Guido Trotter committed
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
    if phase == constants.HOOKS_PHASE_POST:
      # Used to change hooks' output to proper indentation
      indent_re = re.compile('^', re.M)
      feedback_fn("* Hooks Results")
      if not hooks_results:
        feedback_fn("  - ERROR: general communication failure")
        lu_result = 1
      else:
        for node_name in hooks_results:
          show_node_header = True
          res = hooks_results[node_name]
          if res is False or not isinstance(res, list):
            feedback_fn("    Communication failure")
            lu_result = 1
            continue
          for script, hkr, output in res:
            if hkr == constants.HKR_FAIL:
              # The node header is only shown once, if there are
              # failing hooks on that node
              if show_node_header:
                feedback_fn("  Node %s:" % node_name)
                show_node_header = False
              feedback_fn("    ERROR: Script %s failed, output:" % script)
              output = indent_re.sub('      ', output)
              feedback_fn("%s" % output)
              lu_result = 1

      return lu_result

Iustin Pop's avatar
Iustin Pop committed
909

Iustin Pop's avatar
Iustin Pop committed
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
class LUVerifyDisks(NoHooksLU):
  """Verifies the cluster disks status.

  """
  _OP_REQP = []

  def CheckPrereq(self):
    """Check prerequisites.

    This has no prerequisites.

    """
    pass

  def Exec(self, feedback_fn):
    """Verify integrity of cluster disks.

    """
928
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
Iustin Pop's avatar
Iustin Pop committed
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956

    vg_name = self.cfg.GetVGName()
    nodes = utils.NiceSort(self.cfg.GetNodeList())
    instances = [self.cfg.GetInstanceInfo(name)
                 for name in self.cfg.GetInstanceList()]

    nv_dict = {}
    for inst in instances:
      inst_lvs = {}
      if (inst.status != "up" or
          inst.disk_template not in constants.DTS_NET_MIRROR):
        continue
      inst.MapLVsByNode(inst_lvs)
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
      for node, vol_list in inst_lvs.iteritems():
        for vol in vol_list:
          nv_dict[(node, vol)] = inst

    if not nv_dict:
      return result

    node_lvs = rpc.call_volume_list(nodes, vg_name)

    to_act = set()
    for node in nodes:
      # node_volume
      lvs = node_lvs[node]

957
958
959
960
      if isinstance(lvs, basestring):
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
        res_nlvm[node] = lvs
      elif not isinstance(lvs, dict):
Iustin Pop's avatar
Iustin Pop committed
961
962
963
964
965
966
        logger.Info("connection to node %s failed or invalid data returned" %
                    (node,))
        res_nodes.append(node)
        continue

      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
967
968
969
        inst = nv_dict.pop((node, lv_name), None)
        if (not lv_online and inst is not None
            and inst.name not in res_instances):
Iustin Pop's avatar
Iustin Pop committed
970
          res_instances.append(inst.name)
Iustin Pop's avatar
Iustin Pop committed
971

972
973
974
975
976
977
978
    # any leftover items in nv_dict are missing LVs, let's arrange the
    # data better
    for key, inst in nv_dict.iteritems():
      if inst.name not in res_missing:
        res_missing[inst.name] = []
      res_missing[inst.name].append(key)

Iustin Pop's avatar
Iustin Pop committed
979
980
981
    return result


982
983
984
985
986
987
988
class LURenameCluster(LogicalUnit):
  """Rename the cluster.

  """
  HPATH = "cluster-rename"
  HTYPE = constants.HTYPE_CLUSTER
  _OP_REQP = ["name"]
989
  REQ_WSSTORE = True
990
991
992
993
994
995

  def BuildHooksEnv(self):
    """Build hooks env.

    """
    env = {
Iustin Pop's avatar
Iustin Pop committed
996
      "OP_TARGET": self.sstore.GetClusterName(),
997
998
999
1000
1001
1002
1003
1004
1005
      "NEW_NAME": self.op.name,
      }
    mn = self.sstore.GetMasterNode()
    return env, [mn], [mn]

  def CheckPrereq(self):
    """Verify that the passed name is a valid one.

    """
1006
    hostname = utils.HostInfo(self.op.name)
1007

1008
1009
    new_name = hostname.name
    self.ip = new_ip = hostname.ip
1010
1011
1012
1013
1014
1015
    old_name = self.sstore.GetClusterName()
    old_ip = self.sstore.GetMasterIP()
    if new_name == old_name and new_ip == old_ip:
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
                                 " cluster has changed")
    if new_ip != old_ip:
1016
      if utils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT):
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
                                   " reachable on the network. Aborting." %
                                   new_ip)

    self.op.name = new_name

  def Exec(self, feedback_fn):
    """Rename the cluster.

    """
    clustername = self.op.name
    ip = self.ip
    ss = self.sstore

    # shutdown the master IP
    master = ss.GetMasterNode()
1033
    if not rpc.call_node_stop_master(master, False):
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
      raise errors.OpExecError("Could not disable the master role")

    try:
      # modify the sstore
      ss.SetKey(ss.SS_MASTER_IP, ip)
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)

      # Distribute updated ss config to all nodes
      myself = self.cfg.GetNodeInfo(master)
      dist_nodes = self.cfg.GetNodeList()
      if myself.name in dist_nodes:
        dist_nodes.remove(myself.name)

      logger.Debug("Copying updated ssconf data to all nodes")
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
        fname = ss.KeyToFilename(keyname)
        result = rpc.call_upload_file(dist_nodes, fname)
        for to_node in dist_nodes:
          if not result[to_node]:
            logger.Error("copy of file %s to node %s failed" %
                         (fname, to_node))
    finally:
1056
      if not rpc.call_node_start_master(master, False):
1057
1058
        logger.Error("Could not re-enable the master role on the master,"
                     " please restart manually.")
1059
1060


1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
def _RecursiveCheckIfLVMBased(disk):
  """Check if the given disk or its children are lvm-based.

  Args:
    disk: ganeti.objects.Disk object

  Returns:
    boolean indicating whether a LD_LV dev_type was found or not

  """
  if disk.children:
    for chdisk in disk.children:
      if _RecursiveCheckIfLVMBased(chdisk):
        return True
  return disk.dev_type == constants.LD_LV


class LUSetClusterParams(LogicalUnit):
  """Change the parameters of the cluster.

  """
  HPATH = "cluster-modify"
  HTYPE = constants.HTYPE_CLUSTER
  _OP_REQP = []

  def BuildHooksEnv(self):
    """Build hooks env.

    """
    env = {
      "OP_TARGET": self.sstore.GetClusterName(),
      "NEW_VG_NAME": self.op.vg_name,
      }
    mn = self.sstore.GetMasterNode()
    return env, [mn], [mn]

  def CheckPrereq(self):
    """Check prerequisites.

    This checks whether the given params don't conflict and
1101
    if the given volume group is valid.
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117

    """
    if not self.op.vg_name:
      instances = [self.cfg.GetInstanceInfo(name)
                   for name in self.cfg.GetInstanceList()]
      for inst in instances:
        for disk in inst.disks:
          if _RecursiveCheckIfLVMBased(disk):
            raise errors.OpPrereqError("Cannot disable lvm storage while"
                                       " lvm-based instances exist")

    # if vg_name not None, checks given volume group on all nodes
    if self.op.vg_name:
      node_list = self.cfg.GetNodeList()
      vglist = rpc.call_vg_list(node_list)
      for node in node_list:
1118
1119
        vgstatus = utils.CheckVolumeGroupSize(vglist[node], self.op.vg_name,
                                              constants.MIN_VG_SIZE)
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
        if vgstatus:
          raise errors.OpPrereqError("Error on node '%s': %s" %
                                     (node, vgstatus))

  def Exec(self, feedback_fn):
    """Change the parameters of the cluster.

    """
    if self.op.vg_name != self.cfg.GetVGName():
      self.cfg.SetVGName(self.op.vg_name)
    else:
      feedback_fn("Cluster LVM configuration already in desired"
                  " state, not changing")


Iustin Pop's avatar
Iustin Pop committed
1135
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
Iustin Pop's avatar
Iustin Pop committed
1136
1137
1138
1139
1140
1141
1142
  """Sleep and poll for an instance's disk to sync.

  """
  if not instance.disks:
    return True

  if not oneshot:
Iustin Pop's avatar
Iustin Pop committed
1143
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
Iustin Pop's avatar
Iustin Pop committed
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156

  node = instance.primary_node

  for dev in instance.disks:
    cfgw.SetDiskID(dev, node)

  retries = 0
  while True:
    max_time = 0
    done = True
    cumul_degraded = False
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
    if not rstats:
Iustin Pop's avatar
Iustin Pop committed
1157
      proc.LogWarning("Can't get any data from node %s" % node)
Iustin Pop's avatar
Iustin Pop committed
1158
1159
      retries += 1
      if retries >= 10:
1160
1161
        raise errors.RemoteError("Can't contact node %s for mirror data,"
                                 " aborting." % node)
Iustin Pop's avatar
Iustin Pop committed
1162
1163
1164
1165
1166
1167
      time.sleep(6)
      continue
    retries = 0
    for i in range(len(rstats)):
      mstat = rstats[i]
      if mstat is None:
Iustin Pop's avatar
Iustin Pop committed
1168
        proc.LogWarning("Can't compute data for node %s/%s" %
Iustin Pop's avatar
Iustin Pop committed
1169
1170
                        (node, instance.disks[i].iv_name))
        continue
1171
1172
      # we ignore the ldisk parameter
      perc_done, est_time, is_degraded, _ = mstat
Iustin Pop's avatar
Iustin Pop committed
1173
1174
1175
1176
1177
1178
1179
1180
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
      if perc_done is not None:
        done = False
        if est_time is not None:
          rem_time = "%d estimated seconds remaining" % est_time
          max_time = est_time
        else:
          rem_time = "no time estimate"
Iustin Pop's avatar
Iustin Pop committed
1181
1182
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
                     (instance.disks[i].iv_name, perc_done, rem_time))
Iustin Pop's avatar
Iustin Pop committed
1183
1184
1185
    if done or oneshot:
      break

Iustin Pop's avatar
Iustin Pop committed
1186
    time.sleep(min(60, max_time))
Iustin Pop's avatar
Iustin Pop committed
1187
1188

  if done:
Iustin Pop's avatar
Iustin Pop committed
1189
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
Iustin Pop's avatar
Iustin Pop committed
1190
1191
1192
  return not cumul_degraded


1193
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
Iustin Pop's avatar
Iustin Pop committed
1194
1195
  """Check that mirrors are not degraded.

1196
1197
1198
1199
  The ldisk parameter, if True, will change the test from the
  is_degraded attribute (which represents overall non-ok status for
  the device(s)) to the ldisk (representing the local storage status).

Iustin Pop's avatar
Iustin Pop committed
1200
1201
  """
  cfgw.SetDiskID(dev, node)
1202
1203
1204
1205
  if ldisk:
    idx = 6
  else:
    idx = 5
Iustin Pop's avatar
Iustin Pop committed
1206
1207
1208
1209
1210

  result = True
  if on_primary or dev.AssembleOnSecondary():
    rstats = rpc.call_blockdev_find(node, dev)
    if not rstats:
1211
      logger.ToStderr("Node %s: Disk degraded, not found or node down" % node)
Iustin Pop's avatar
Iustin Pop committed
1212
1213
      result = False
    else:
1214
      result = result and (not rstats[idx])
Iustin Pop's avatar
Iustin Pop committed
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
  if dev.children:
    for child in dev.children:
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)

  return result


class LUDiagnoseOS(NoHooksLU):
  """Logical unit for OS diagnose/query.

  """
1226
  _OP_REQP = ["output_fields", "names"]
Guido Trotter's avatar
Guido Trotter committed
1227
  REQ_BGL = False
Iustin Pop's avatar
Iustin Pop committed
1228

Guido Trotter's avatar
Guido Trotter committed
1229
  def ExpandNames(self):
1230
1231
1232
1233
1234
1235
1236
1237
    if self.op.names:
      raise errors.OpPrereqError("Selective OS query not supported")

    self.dynamic_fields = frozenset(["name", "valid", "node_status"])
    _CheckOutputFields(static=[],
                       dynamic=self.dynamic_fields,
                       selected=self.op.output_fields)

Guido Trotter's avatar
Guido Trotter committed
1238
1239
1240
    # Lock all nodes, in shared mode
    self.needed_locks = {}
    self.share_locks[locking.LEVEL_NODE] = 1
1241
    self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
Guido Trotter's avatar
Guido Trotter committed
1242
1243
1244
1245
1246
1247

  def CheckPrereq(self):
    """Check prerequisites.

    """

1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
  @staticmethod
  def _DiagnoseByOS(node_list, rlist):
    """Remaps a per-node return list into an a per-os per-node dictionary

      Args:
        node_list: a list with the names of all nodes
        rlist: a map with node names as keys and OS objects as values

      Returns:
        map: a map with osnames as keys and as value another map, with
             nodes as
             keys and list of OS objects as values
             e.g. {"debian-etch": {"node1": [<object>,...],
                                   "node2": [<object>,]}
                  }

    """
    all_os = {}
    for node_name, nr in rlist.iteritems():
      if not nr:
        continue
Iustin Pop's avatar
Iustin Pop committed
1269
1270
      for os_obj in nr:
        if os_obj.name not in all_os:
1271
1272
          # build a list of nodes for this os containing empty lists
          # for each node in node_list
Iustin Pop's avatar
Iustin Pop committed
1273
          all_os[os_obj.name] = {}
1274
          for nname in node_list:
Iustin Pop's avatar
Iustin Pop committed
1275
1276
            all_os[os_obj.name][nname] = []
        all_os[os_obj.name][node_name].append(os_obj)
1277
    return all_os
Iustin Pop's avatar
Iustin Pop committed
1278
1279
1280
1281
1282

  def Exec(self, feedback_fn):
    """Compute the list of OSes.

    """
Guido Trotter's avatar
Guido Trotter committed
1283
    node_list = self.acquired_locks[locking.LEVEL_NODE]
Iustin Pop's avatar
Iustin Pop committed
1284
1285
    node_data = rpc.call_os_diagnose(node_list)
    if node_data == False:
1286
      raise errors.OpExecError("Can't gather the list of OSes")
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
    pol = self._DiagnoseByOS(node_list, node_data)
    output = []
    for os_name, os_data in pol.iteritems():
      row = []
      for field in self.op.output_fields:
        if field == "name":
          val = os_name
        elif field == "valid":
          val = utils.all([osl and osl[0] for osl in os_data.values()])
        elif field == "node_status":
          val = {}
          for node_name, nos_list in os_data.iteritems():
            val[node_name] = [(v.status, v.path) for v in nos_list]
        else:
          raise errors.ParameterError(field)
        row.append(val)
      output.append(row)

    return output
Iustin Pop's avatar
Iustin Pop committed
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319


class LURemoveNode(LogicalUnit):
  """Logical unit for removing a node.

  """
  HPATH = "node-remove"
  HTYPE = constants.HTYPE_NODE
  _OP_REQP = ["node_name"]

  def BuildHooksEnv(self):
    """Build hooks env.

    This doesn't run on the target node in the pre phase as a failed
1320
    node would then be impossible to remove.
Iustin Pop's avatar
Iustin Pop committed
1321
1322

    """
1323
    env = {
1324
      "OP_TARGET": self.op.node_name,
1325
1326
      "NODE_NAME": self.op.node_name,
      }
Iustin Pop's avatar
Iustin Pop committed
1327
1328
    all_nodes = self.cfg.GetNodeList()
    all_nodes.remove(self.op.node_name)
1329
    return env, all_nodes, all_nodes
Iustin Pop's avatar
Iustin Pop committed
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343

  def CheckPrereq(self):
    """Check prerequisites.

    This checks:
     - the node exists in the configuration
     - it does not have primary or secondary instances
     - it's not the master

    Any errors are signalled by raising errors.OpPrereqError.

    """
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
    if node is None:
1344
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
Iustin Pop's avatar
Iustin Pop committed
1345
1346
1347

    instance_list = self.cfg.GetInstanceList()

1348
    masternode = self.sstore.GetMasterNode()
Iustin Pop's avatar
Iustin Pop committed
1349
    if node.name == masternode:
1350
1351
      raise errors.OpPrereqError("Node is the master node,"
                                 " you need to failover first.")
Iustin Pop's avatar
Iustin Pop committed
1352
1353
1354
1355

    for instance_name in instance_list:
      instance = self.cfg.GetInstanceInfo(instance_name)
      if node.name == instance.primary_node:
1356
1357
        raise errors.OpPrereqError("Instance %s still running on the node,"
                                   " please remove first." % instance_name)
Iustin Pop's avatar
Iustin Pop committed
1358
      if node.name in instance.secondary_nodes:
1359
1360
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
                                   " please remove first." % instance_name)
Iustin Pop's avatar
Iustin Pop committed
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
    self.op.node_name = node.name
    self.node = node

  def Exec(self, feedback_fn):
    """Removes the node from the cluster.

    """
    node = self.node
    logger.Info("stopping the node daemon and removing configs from node %s" %
                node.name)

1372
    self.context.RemoveNode(node.name)
Iustin Pop's avatar
Iustin Pop committed
1373

1374
    rpc.call_node_leave_cluster(node.name)
1375

Iustin Pop's avatar
Iustin Pop committed
1376
1377
1378
1379
1380

class LUQueryNodes(NoHooksLU):
  """Logical unit for querying nodes.

  """
1381
  _OP_REQP = ["output_fields", "names"]
Guido Trotter's avatar
Guido Trotter committed
1382
  REQ_BGL = False
Iustin Pop's avatar
Iustin Pop committed
1383

Guido Trotter's avatar
Guido Trotter committed
1384
  def ExpandNames(self):
1385
1386
1387
1388
1389
1390
    self.dynamic_fields = frozenset([
      "dtotal", "dfree",
      "mtotal", "mnode", "mfree",
      "bootid",
      "ctotal",
      ])
Iustin Pop's avatar
Iustin Pop committed
1391

1392
1393
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
                               "pinst_list", "sinst_list",
1394
                               "pip", "sip", "tags"],
1395
1396
                       dynamic=self.dynamic_fields,
                       selected=self.op.output_fields)
Iustin Pop's avatar
Iustin Pop committed
1397

Guido Trotter's avatar
Guido Trotter committed
1398
1399
1400
1401
1402
1403
    self.needed_locks = {}
    self.share_locks[locking.LEVEL_NODE] = 1
    # TODO: we could lock nodes only if the user asked for dynamic fields. For
    # that we need atomic ways to get info for a group of nodes from the
    # config, though.
    if not self.op.names:
1404
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
Guido Trotter's avatar
Guido Trotter committed
1405
    else:
1406
1407
      self.needed_locks[locking.LEVEL_NODE] = \
        _GetWantedNodes(self, self.op.names)
Guido Trotter's avatar
Guido Trotter committed
1408
1409
1410
1411
1412
1413

  def CheckPrereq(self):
    """Check prerequisites.

    """
    # This of course is valid only if we locked the nodes
1414
    self.wanted = self.acquired_locks[locking.LEVEL_NODE]
Iustin Pop's avatar
Iustin Pop committed
1415
1416
1417
1418
1419

  def Exec(self, feedback_fn):
    """Computes the list of nodes and their attributes.

    """
1420
    nodenames = self.wanted
Iustin Pop's avatar
Iustin Pop committed
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]

    # begin data gathering

    if self.dynamic_fields.intersection(self.op.output_fields):
      live_data = {}
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
      for name in nodenames:
        nodeinfo = node_data.get(name, None)
        if nodeinfo:
          live_data[name] = {
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1437
            "ctotal": utils.TryConvert(int, nodeinfo['cpu_total']),
1438
            "bootid": nodeinfo['bootid'],
Iustin Pop's avatar
Iustin Pop committed
1439
1440
1441
1442
1443
1444
            }
        else:
          live_data[name] = {}
    else:
      live_data = dict.fromkeys(nodenames, {})

1445
1446
    node_to_primary = dict([(name, set()) for name in nodenames])
    node_to_secondary = dict([(name, set()) for name in nodenames])
Iustin Pop's avatar
Iustin Pop committed
1447

1448
1449
1450
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
                             "sinst_cnt", "sinst_list"))
    if inst_fields & frozenset(self.op.output_fields):
Iustin Pop's avatar
Iustin Pop committed
1451
1452
      instancelist = self.cfg.GetInstanceList()

1453
1454
1455
1456
1457
1458
1459
      for instance_name in instancelist:
        inst = self.cfg.GetInstanceInfo(instance_name)
        if inst.primary_node in node_to_primary:
          node_to_primary[inst.primary_node].add(inst.name)
        for secnode in inst.secondary_nodes:
          if secnode in node_to_secondary:
            node_to_secondary[secnode].add(inst.name)
Iustin Pop's avatar
Iustin Pop committed
1460
1461
1462
1463
1464
1465
1466
1467
1468

    # end data gathering

    output = []
    for node in nodelist:
      node_output = []
      for field in self.op.output_fields:
        if field == "name":
          val = node.name
1469
1470
1471
1472
1473
1474
1475
1476
        elif field == "pinst_list":
          val = list(node_to_primary[node.name])
        elif field == "sinst_list":
          val = list(node_to_secondary[node.name])
        elif field == "pinst_cnt":
          val = len(node_to_primary[node.name])
        elif field == "sinst_cnt":
          val = len(node_to_secondary[node.name])
Iustin Pop's avatar
Iustin Pop committed
1477
1478
1479
1480
        elif field == "pip":
          val = node.primary_ip
        elif field == "sip":
          val = node.secondary_ip
1481
1482
        elif field == "tags":
          val = list(node.GetTags())
Iustin Pop's avatar
Iustin Pop committed
1483
        elif field in self.dynamic_fields:
1484
          val = live_data[node.name].get(field, None)
Iustin Pop's avatar
Iustin Pop committed
1485
        else: