mcpu.py 19.8 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
#
Iustin Pop's avatar
Iustin Pop committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#

# Copyright (C) 2006, 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Module implementing the logic behind the cluster operations

This module implements the logic for doing operations in the cluster. There
are two kinds of classes defined:
  - logical units, which know how to deal with their specific opcode only
  - the processor, which dispatches the opcodes to their logical units

"""

31
import logging
32
33
import random
import time
Iustin Pop's avatar
Iustin Pop committed
34
35
36
37
38
39

from ganeti import opcodes
from ganeti import constants
from ganeti import errors
from ganeti import rpc
from ganeti import cmdlib
40
from ganeti import locking
Iustin Pop's avatar
Iustin Pop committed
41

42

43
44
45
46
47
48
class _LockAcquireTimeout(Exception):
  """Internal exception to report timeouts on acquiring locks.

  """


49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
def _CalculateLockAttemptTimeouts():
  """Calculate timeouts for lock attempts.

  """
  running_sum = 0
  result = [1.0]

  # Wait for a total of at least 150s before doing a blocking acquire
  while sum(result) < 150.0:
    timeout = (result[-1] * 1.05) ** 1.25

    # Cap timeout at 10 seconds. This gives other jobs a chance to run
    # even if we're still trying to get our locks, before finally moving
    # to a blocking acquire.
    if timeout > 10.0:
      timeout = 10.0

    elif timeout < 0.1:
      # Lower boundary for safety
      timeout = 0.1

    result.append(timeout)

  return result


class _LockAttemptTimeoutStrategy(object):
76
77
78
79
  """Class with lock acquire timeout strategy.

  """
  __slots__ = [
80
    "_attempt",
81
82
    "_random_fn",
    "_start_time",
83
    "_time_fn",
84
85
    ]

86
  _TIMEOUT_PER_ATTEMPT = _CalculateLockAttemptTimeouts()
87

88
  def __init__(self, attempt=0, _time_fn=time.time, _random_fn=random.random):
89
90
    """Initializes this class.

91
92
93
    @type attempt: int
    @param attempt: Current attempt number
    @param _time_fn: Time function for unittests
94
95
96
97
98
    @param _random_fn: Random number generator for unittests

    """
    object.__init__(self)

99
100
    if attempt < 0:
      raise ValueError("Attempt must be zero or positive")
101

102
103
104
105
106
    self._attempt = attempt
    self._time_fn = _time_fn
    self._random_fn = _random_fn

    self._start_time = None
107
108

  def NextAttempt(self):
109
    """Returns the strategy for the next attempt.
110
111

    """
112
113
114
    return _LockAttemptTimeoutStrategy(attempt=self._attempt + 1,
                                       _time_fn=self._time_fn,
                                       _random_fn=self._random_fn)
115
116
117
118
119

  def CalcRemainingTimeout(self):
    """Returns the remaining timeout.

    """
120
121
122
123
    try:
      timeout = self._TIMEOUT_PER_ATTEMPT[self._attempt]
    except IndexError:
      # No more timeouts, do blocking acquire
124
125
126
127
      return None

    # Get start time on first calculation
    if self._start_time is None:
128
      self._start_time = self._time_fn()
129
130

    # Calculate remaining time for this attempt
131
    remaining_timeout = self._start_time + timeout - self._time_fn()
132
133
134

    # Add a small variation (-/+ 5%) to timeouts. This helps in situations
    # where two or more jobs are fighting for the same lock(s).
135
136
137
    variation_range = remaining_timeout * 0.1
    remaining_timeout += ((self._random_fn() * variation_range) -
                          (variation_range * 0.5))
138

139
    assert remaining_timeout >= 0.0, "Timeout must be positive"
140

141
    return remaining_timeout
142
143


144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
class OpExecCbBase:
  """Base class for OpCode execution callbacks.

  """
  def NotifyStart(self):
    """Called when we are about to execute the LU.

    This function is called when we're about to start the lu's Exec() method,
    that is, after we have acquired all locks.

    """

  def Feedback(self, *args):
    """Sends feedback from the LU code to the end-user.

    """

161
162
163
164
165
  def ReportLocks(self, msg):
    """Report lock operations.

    """

166

Iustin Pop's avatar
Iustin Pop committed
167
168
169
170
class Processor(object):
  """Object which runs OpCodes"""
  DISPATCH_TABLE = {
    # Cluster
171
    opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster,
Iustin Pop's avatar
Iustin Pop committed
172
173
174
    opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
    opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
    opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
175
    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
176
    opcodes.OpRenameCluster: cmdlib.LURenameCluster,
177
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
178
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
179
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
180
    opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
Iustin Pop's avatar
Iustin Pop committed
181
182
183
    # node lu
    opcodes.OpAddNode: cmdlib.LUAddNode,
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
184
    opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
185
    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
186
    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
187
    opcodes.OpRepairNodeStorage: cmdlib.LURepairNodeStorage,
Iustin Pop's avatar
Iustin Pop committed
188
    opcodes.OpRemoveNode: cmdlib.LURemoveNode,
Iustin Pop's avatar
Iustin Pop committed
189
    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
Iustin Pop's avatar
Iustin Pop committed
190
    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
191
    opcodes.OpEvacuateNode: cmdlib.LUEvacuateNode,
192
    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
Iustin Pop's avatar
Iustin Pop committed
193
194
    # instance lu
    opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
195
    opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
Iustin Pop's avatar
Iustin Pop committed
196
    opcodes.OpRemoveInstance: cmdlib.LURemoveInstance,
197
    opcodes.OpRenameInstance: cmdlib.LURenameInstance,
Iustin Pop's avatar
Iustin Pop committed
198
199
200
    opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks,
    opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance,
    opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
201
    opcodes.OpRebootInstance: cmdlib.LURebootInstance,
Iustin Pop's avatar
Iustin Pop committed
202
203
    opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
    opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
Iustin Pop's avatar
Iustin Pop committed
204
    opcodes.OpRecreateInstanceDisks: cmdlib.LURecreateInstanceDisks,
Iustin Pop's avatar
Iustin Pop committed
205
    opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
206
    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
207
    opcodes.OpMoveInstance: cmdlib.LUMoveInstance,
Iustin Pop's avatar
Iustin Pop committed
208
209
210
    opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
    opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
    opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
211
    opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
Iustin Pop's avatar
Iustin Pop committed
212
    opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
Iustin Pop's avatar
Iustin Pop committed
213
214
215
216
217
    # os lu
    opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
    # exports lu
    opcodes.OpQueryExports: cmdlib.LUQueryExports,
    opcodes.OpExportInstance: cmdlib.LUExportInstance,
218
    opcodes.OpRemoveExport: cmdlib.LURemoveExport,
219
220
    # tags lu
    opcodes.OpGetTags: cmdlib.LUGetTags,
Iustin Pop's avatar
Iustin Pop committed
221
    opcodes.OpSearchTags: cmdlib.LUSearchTags,
222
223
    opcodes.OpAddTags: cmdlib.LUAddTags,
    opcodes.OpDelTags: cmdlib.LUDelTags,
224
225
    # test lu
    opcodes.OpTestDelay: cmdlib.LUTestDelay,
226
    opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
Iustin Pop's avatar
Iustin Pop committed
227
228
    }

229
  def __init__(self, context):
Iustin Pop's avatar
Iustin Pop committed
230
231
232
    """Constructor for Processor

    """
233
    self.context = context
234
    self._cbs = None
Iustin Pop's avatar
Iustin Pop committed
235
    self.rpc = rpc.RpcRunner(context.cfg)
236
    self.hmclass = HooksMaster
Iustin Pop's avatar
Iustin Pop committed
237

238
  def _ReportLocks(self, level, names, shared, timeout, acquired, result):
239
240
241
242
243
244
245
    """Reports lock operations.

    @type level: int
    @param level: Lock level
    @type names: list or string
    @param names: Lock names
    @type shared: bool
246
247
248
    @param shared: Whether the locks should be acquired in shared mode
    @type timeout: None or float
    @param timeout: Timeout for acquiring the locks
249
    @type acquired: bool
250
251
252
    @param acquired: Whether the locks have already been acquired
    @type result: None or set
    @param result: Result from L{locking.GanetiLockManager.acquire}
253
254
255
256
257
258

    """
    parts = []

    # Build message
    if acquired:
259
260
261
262
      if result is None:
        parts.append("timeout")
      else:
        parts.append("acquired")
263
264
    else:
      parts.append("waiting")
265
266
267
268
      if timeout is None:
        parts.append("blocking")
      else:
        parts.append("timeout=%0.6fs" % timeout)
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290

    parts.append(locking.LEVEL_NAMES[level])

    if names == locking.ALL_SET:
      parts.append("ALL")
    elif isinstance(names, basestring):
      parts.append(names)
    else:
      parts.append(",".join(names))

    if shared:
      parts.append("shared")
    else:
      parts.append("exclusive")

    msg = "/".join(parts)

    logging.debug("LU locks %s", msg)

    if self._cbs:
      self._cbs.ReportLocks(msg)

291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
  def _AcquireLocks(self, level, names, shared, timeout):
    """Acquires locks via the Ganeti lock manager.

    @type level: int
    @param level: Lock level
    @type names: list or string
    @param names: Lock names
    @type shared: bool
    @param shared: Whether the locks should be acquired in shared mode
    @type timeout: None or float
    @param timeout: Timeout for acquiring the locks

    """
    self._ReportLocks(level, names, shared, timeout, False, None)

    acquired = self.context.glm.acquire(level, names, shared=shared,
                                        timeout=timeout)

    self._ReportLocks(level, names, shared, timeout, True, acquired)

    return acquired

313
314
315
316
317
318
  def _ExecLU(self, lu):
    """Logical Unit execution sequence.

    """
    write_count = self.context.cfg.write_count
    lu.CheckPrereq()
319
    hm = HooksMaster(self.rpc.call_hooks_runner, lu)
320
321
    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
322
                     self._Feedback, None)
323
324
325
326
327
328
329
330
331

    if getattr(lu.op, "dry_run", False):
      # in this mode, no post-hooks are run, and the config is not
      # written (as it might have been modified by another LU, and we
      # shouldn't do writeout on behalf of other threads
      self.LogInfo("dry-run mode requested, not actually executing"
                   " the operation")
      return lu.dry_run_result

332
    try:
333
      result = lu.Exec(self._Feedback)
334
335
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
336
                                self._Feedback, result)
337
338
339
340
341
342
343
    finally:
      # FIXME: This needs locks if not lu_class.REQ_BGL
      if write_count != self.context.cfg.write_count:
        hm.RunConfigUpdate()

    return result

344
  def _LockAndExecLU(self, lu, level, calc_timeout):
345
346
347
348
349
350
351
    """Execute a Logical Unit, with the needed locks.

    This is a recursive function that starts locking the given level, and
    proceeds up, till there are no more locks to acquire. Then it executes the
    given LU and its opcodes.

    """
352
353
    adding_locks = level in lu.add_locks
    acquiring_locks = level in lu.needed_locks
354
    if level not in locking.LEVELS:
355
356
357
      if self._cbs:
        self._cbs.NotifyStart()

358
      result = self._ExecLU(lu)
359

360
361
362
    elif adding_locks and acquiring_locks:
      # We could both acquire and add locks at the same level, but for now we
      # don't need this, so we'll avoid the complicated code needed.
363
364
365
      raise NotImplementedError("Can't declare locks to acquire when adding"
                                " others")

366
    elif adding_locks or acquiring_locks:
Guido Trotter's avatar
Guido Trotter committed
367
      lu.DeclareLocks(level)
Guido Trotter's avatar
Guido Trotter committed
368
      share = lu.share_locks[level]
369

370
      try:
371
372
373
374
375
376
377
        assert adding_locks ^ acquiring_locks, \
          "Locks must be either added or acquired"

        if acquiring_locks:
          # Acquiring locks
          needed_locks = lu.needed_locks[level]

378
379
          acquired = self._AcquireLocks(level, needed_locks, share,
                                        calc_timeout())
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398

          if acquired is None:
            raise _LockAcquireTimeout()

          lu.acquired_locks[level] = acquired

        else:
          # Adding locks
          add_locks = lu.add_locks[level]
          lu.remove_locks[level] = add_locks

          try:
            self.context.glm.add(level, add_locks, acquired=1, shared=share)
          except errors.LockError:
            raise errors.OpPrereqError(
              "Couldn't add locks (%s), probably because of a race condition"
              " with another job, who added them first" % add_locks)

          lu.acquired_locks[level] = add_locks
399
        try:
400
          result = self._LockAndExecLU(lu, level + 1, calc_timeout)
401
402
403
        finally:
          if level in lu.remove_locks:
            self.context.glm.remove(level, lu.remove_locks[level])
404
      finally:
405
        if self.context.glm.is_owned(level):
406
          self.context.glm.release(level)
407

408
    else:
409
      result = self._LockAndExecLU(lu, level + 1, calc_timeout)
410
411
412

    return result

413
  def ExecOpCode(self, op, cbs):
Iustin Pop's avatar
Iustin Pop committed
414
415
    """Execute an opcode.

Iustin Pop's avatar
Iustin Pop committed
416
417
    @type op: an OpCode instance
    @param op: the opcode to be executed
418
419
    @type cbs: L{OpExecCbBase}
    @param cbs: Runtime callbacks
Iustin Pop's avatar
Iustin Pop committed
420
421
422

    """
    if not isinstance(op, opcodes.OpCode):
423
424
      raise errors.ProgrammerError("Non-opcode instance passed"
                                   " to ExecOpcode")
Iustin Pop's avatar
Iustin Pop committed
425

426
    self._cbs = cbs
427
    try:
428
429
430
431
      lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
      if lu_class is None:
        raise errors.OpCodeUnknown("Unknown opcode")

432
      timeout_strategy = _LockAttemptTimeoutStrategy()
433
434
435

      while True:
        try:
436
437
          acquire_timeout = timeout_strategy.CalcRemainingTimeout()

438
439
440
441
          # Acquire the Big Ganeti Lock exclusively if this LU requires it,
          # and in a shared fashion otherwise (to prevent concurrent run with
          # an exclusive LU.
          if self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL,
442
                                not lu_class.REQ_BGL, acquire_timeout) is None:
443
444
445
446
447
448
449
            raise _LockAcquireTimeout()

          try:
            lu = lu_class(self, op, self.context, self.rpc)
            lu.ExpandNames()
            assert lu.needed_locks is not None, "needed_locks not set by LU"

450
451
            return self._LockAndExecLU(lu, locking.LEVEL_INSTANCE,
                                       timeout_strategy.CalcRemainingTimeout)
452
453
454
455
456
457
458
          finally:
            self.context.glm.release(locking.LEVEL_CLUSTER)

        except _LockAcquireTimeout:
          # Timeout while waiting for lock, try again
          pass

459
        timeout_strategy = timeout_strategy.NextAttempt()
460

461
    finally:
462
      self._cbs = None
463

464
465
466
467
468
469
470
  def _Feedback(self, *args):
    """Forward call to feedback callback function.

    """
    if self._cbs:
      self._cbs.Feedback(*args)

471
472
473
474
  def LogStep(self, current, total, message):
    """Log a change in LU execution progress.

    """
475
    logging.debug("Step %d/%d %s", current, total, message)
476
    self._Feedback("STEP %d/%d %s" % (current, total, message))
477

478
  def LogWarning(self, message, *args, **kwargs):
479
480
    """Log a warning to the logs and the user.

481
482
483
484
    The optional keyword argument is 'hint' and can be used to show a
    hint to the user (presumably related to the warning). If the
    message is empty, it will not be printed at all, allowing one to
    show only a hint.
485

486
487
488
489
490
491
492
    """
    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
    if args:
      message = message % tuple(args)
    if message:
      logging.warning(message)
493
      self._Feedback(" - WARNING: %s" % message)
494
    if "hint" in kwargs:
495
      self._Feedback("      Hint: %s" % kwargs["hint"])
496
497

  def LogInfo(self, message, *args):
498
499
500
    """Log an informational message to the logs and the user.

    """
501
502
    if args:
      message = message % tuple(args)
503
    logging.info(message)
504
    self._Feedback(" - INFO: %s" % message)
505

Iustin Pop's avatar
Iustin Pop committed
506
507
508
509
510
511
512
513
514
515
516
517
518

class HooksMaster(object):
  """Hooks master.

  This class distributes the run commands to the nodes based on the
  specific LU class.

  In order to remove the direct dependency on the rpc module, the
  constructor needs a function which actually does the remote
  call. This will usually be rpc.call_hooks_runner, but any function
  which behaves the same works.

  """
519
  def __init__(self, callfn, lu):
Iustin Pop's avatar
Iustin Pop committed
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
    self.callfn = callfn
    self.lu = lu
    self.op = lu.op
    self.env, node_list_pre, node_list_post = self._BuildEnv()
    self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
                      constants.HOOKS_PHASE_POST: node_list_post}

  def _BuildEnv(self):
    """Compute the environment and the target nodes.

    Based on the opcode and the current node list, this builds the
    environment for the hooks and the target node list for the run.

    """
    env = {
      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
      "GANETI_OP_CODE": self.op.OP_ID,
      "GANETI_OBJECT_TYPE": self.lu.HTYPE,
539
      "GANETI_DATA_DIR": constants.DATA_DIR,
Iustin Pop's avatar
Iustin Pop committed
540
541
      }

542
543
544
545
546
547
548
    if self.lu.HPATH is not None:
      lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
      if lu_env:
        for key in lu_env:
          env["GANETI_" + key] = lu_env[key]
    else:
      lu_nodes_pre = lu_nodes_post = []
Iustin Pop's avatar
Iustin Pop committed
549

550
551
552
553
554
555
556
557
558
559
560
    return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post)

  def _RunWrapper(self, node_list, hpath, phase):
    """Simple wrapper over self.callfn.

    This method fixes the environment before doing the rpc call.

    """
    env = self.env.copy()
    env["GANETI_HOOKS_PHASE"] = phase
    env["GANETI_HOOKS_PATH"] = hpath
Michael Hanselmann's avatar
Michael Hanselmann committed
561
562
563
    if self.lu.cfg is not None:
      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
Iustin Pop's avatar
Iustin Pop committed
564

565
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
Iustin Pop's avatar
Iustin Pop committed
566

567
    return self.callfn(node_list, hpath, phase, env)
Iustin Pop's avatar
Iustin Pop committed
568

569
  def RunPhase(self, phase, nodes=None):
Iustin Pop's avatar
Iustin Pop committed
570
571
572
573
    """Run all the scripts for a phase.

    This is the main function of the HookMaster.

Iustin Pop's avatar
Iustin Pop committed
574
575
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
576
    @param nodes: overrides the predefined list of nodes for the given phase
Iustin Pop's avatar
Iustin Pop committed
577
578
    @return: the processed results of the hooks multi-node rpc call
    @raise errors.HooksFailure: on communication failure to the nodes
579
    @raise errors.HooksAbort: on failure of one of the hooks
580

Iustin Pop's avatar
Iustin Pop committed
581
    """
582
    if not self.node_list[phase] and not nodes:
583
584
585
      # empty node list, we should not attempt to run this as either
      # we're in the cluster init phase and the rpc client part can't
      # even attempt to run, or this LU doesn't do hooks at all
Iustin Pop's avatar
Iustin Pop committed
586
      return
587
    hpath = self.lu.HPATH
588
589
590
591
    if nodes is not None:
      results = self._RunWrapper(nodes, hpath, phase)
    else:
      results = self._RunWrapper(self.node_list[phase], hpath, phase)
592
593
594
595
596
597
598
    errs = []
    if not results:
      msg = "Communication Failure"
      if phase == constants.HOOKS_PHASE_PRE:
        raise errors.HooksFailure(msg)
      else:
        self.lu.LogWarning(msg)
599
        return results
600
601
602
603
    for node_name in results:
      res = results[node_name]
      if res.offline:
        continue
604
      msg = res.fail_msg
605
606
607
608
609
610
611
      if msg:
        self.lu.LogWarning("Communication failure to node %s: %s",
                           node_name, msg)
        continue
      for script, hkr, output in res.payload:
        if hkr == constants.HKR_FAIL:
          if phase == constants.HOOKS_PHASE_PRE:
Iustin Pop's avatar
Iustin Pop committed
612
            errs.append((node_name, script, output))
613
614
          else:
            if not output:
615
              output = "(no output)"
616
617
618
619
            self.lu.LogWarning("On %s script %s failed, output: %s" %
                               (node_name, script, output))
    if errs and phase == constants.HOOKS_PHASE_PRE:
      raise errors.HooksAbort(errs)
620
    return results
621
622
623
624
625
626
627
628
629
630

  def RunConfigUpdate(self):
    """Run the special configuration update hook

    This is a special hook that runs only on the master after each
    top-level LI if the configuration has been updated.

    """
    phase = constants.HOOKS_PHASE_POST
    hpath = constants.HOOKS_NAME_CFGUPDATE
Michael Hanselmann's avatar
Michael Hanselmann committed
631
    nodes = [self.lu.cfg.GetMasterNode()]
Iustin Pop's avatar
Iustin Pop committed
632
    self._RunWrapper(nodes, hpath, phase)