mcpu.py 18.8 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
#
Iustin Pop's avatar
Iustin Pop committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#

# Copyright (C) 2006, 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Module implementing the logic behind the cluster operations

This module implements the logic for doing operations in the cluster. There
are two kinds of classes defined:
  - logical units, which know how to deal with their specific opcode only
  - the processor, which dispatches the opcodes to their logical units

"""

31
import logging
32
33
import random
import time
Iustin Pop's avatar
Iustin Pop committed
34
35
36
37
38
39

from ganeti import opcodes
from ganeti import constants
from ganeti import errors
from ganeti import rpc
from ganeti import cmdlib
40
from ganeti import locking
41
from ganeti import utils
Iustin Pop's avatar
Iustin Pop committed
42

43

44
45
class LockAcquireTimeout(Exception):
  """Exception to report timeouts on acquiring locks.
46
47
48
49

  """


50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
def _CalculateLockAttemptTimeouts():
  """Calculate timeouts for lock attempts.

  """
  result = [1.0]

  # Wait for a total of at least 150s before doing a blocking acquire
  while sum(result) < 150.0:
    timeout = (result[-1] * 1.05) ** 1.25

    # Cap timeout at 10 seconds. This gives other jobs a chance to run
    # even if we're still trying to get our locks, before finally moving
    # to a blocking acquire.
    if timeout > 10.0:
      timeout = 10.0

    elif timeout < 0.1:
      # Lower boundary for safety
      timeout = 0.1

    result.append(timeout)

  return result


75
class LockAttemptTimeoutStrategy(object):
76
77
78
79
  """Class with lock acquire timeout strategy.

  """
  __slots__ = [
80
    "_timeouts",
81
    "_random_fn",
82
    "_time_fn",
83
84
    ]

85
  _TIMEOUT_PER_ATTEMPT = _CalculateLockAttemptTimeouts()
86

87
  def __init__(self, _time_fn=time.time, _random_fn=random.random):
88
89
    """Initializes this class.

90
    @param _time_fn: Time function for unittests
91
92
93
94
95
    @param _random_fn: Random number generator for unittests

    """
    object.__init__(self)

96
    self._timeouts = iter(self._TIMEOUT_PER_ATTEMPT)
97
98
99
    self._time_fn = _time_fn
    self._random_fn = _random_fn

100
  def NextAttempt(self):
101
    """Returns the timeout for the next attempt.
102
103

    """
104
105
106
107
108
    try:
      timeout = self._timeouts.next()
    except StopIteration:
      # No more timeouts, do blocking acquire
      timeout = None
109

110
111
112
113
114
115
    if timeout is not None:
      # Add a small variation (-/+ 5%) to timeout. This helps in situations
      # where two or more jobs are fighting for the same lock(s).
      variation_range = timeout * 0.1
      timeout += ((self._random_fn() * variation_range) -
                  (variation_range * 0.5))
116

117
    return timeout
118
119


Iustin Pop's avatar
Iustin Pop committed
120
class OpExecCbBase: # pylint: disable-msg=W0232
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
  """Base class for OpCode execution callbacks.

  """
  def NotifyStart(self):
    """Called when we are about to execute the LU.

    This function is called when we're about to start the lu's Exec() method,
    that is, after we have acquired all locks.

    """

  def Feedback(self, *args):
    """Sends feedback from the LU code to the end-user.

    """

137
138
  def CheckCancel(self):
    """Check whether job has been cancelled.
139
140
141

    """

142

Iustin Pop's avatar
Iustin Pop committed
143
144
145
146
class Processor(object):
  """Object which runs OpCodes"""
  DISPATCH_TABLE = {
    # Cluster
147
    opcodes.OpPostInitCluster: cmdlib.LUPostInitCluster,
Iustin Pop's avatar
Iustin Pop committed
148
149
150
    opcodes.OpDestroyCluster: cmdlib.LUDestroyCluster,
    opcodes.OpQueryClusterInfo: cmdlib.LUQueryClusterInfo,
    opcodes.OpVerifyCluster: cmdlib.LUVerifyCluster,
151
    opcodes.OpQueryConfigValues: cmdlib.LUQueryConfigValues,
152
    opcodes.OpRenameCluster: cmdlib.LURenameCluster,
153
    opcodes.OpVerifyDisks: cmdlib.LUVerifyDisks,
154
    opcodes.OpSetClusterParams: cmdlib.LUSetClusterParams,
155
    opcodes.OpRedistributeConfig: cmdlib.LURedistributeConfig,
156
    opcodes.OpRepairDiskSizes: cmdlib.LURepairDiskSizes,
Michael Hanselmann's avatar
Michael Hanselmann committed
157
158
    opcodes.OpQuery: cmdlib.LUQuery,
    opcodes.OpQueryFields: cmdlib.LUQueryFields,
Iustin Pop's avatar
Iustin Pop committed
159
160
161
    # node lu
    opcodes.OpAddNode: cmdlib.LUAddNode,
    opcodes.OpQueryNodes: cmdlib.LUQueryNodes,
162
    opcodes.OpQueryNodeVolumes: cmdlib.LUQueryNodeVolumes,
163
    opcodes.OpQueryNodeStorage: cmdlib.LUQueryNodeStorage,
164
    opcodes.OpModifyNodeStorage: cmdlib.LUModifyNodeStorage,
165
    opcodes.OpRepairNodeStorage: cmdlib.LURepairNodeStorage,
Iustin Pop's avatar
Iustin Pop committed
166
    opcodes.OpRemoveNode: cmdlib.LURemoveNode,
Iustin Pop's avatar
Iustin Pop committed
167
    opcodes.OpSetNodeParams: cmdlib.LUSetNodeParams,
Iustin Pop's avatar
Iustin Pop committed
168
    opcodes.OpPowercycleNode: cmdlib.LUPowercycleNode,
169
    opcodes.OpMigrateNode: cmdlib.LUMigrateNode,
Iustin Pop's avatar
Iustin Pop committed
170
    opcodes.OpNodeEvacuationStrategy: cmdlib.LUNodeEvacuationStrategy,
Iustin Pop's avatar
Iustin Pop committed
171
172
    # instance lu
    opcodes.OpCreateInstance: cmdlib.LUCreateInstance,
173
    opcodes.OpReinstallInstance: cmdlib.LUReinstallInstance,
Iustin Pop's avatar
Iustin Pop committed
174
    opcodes.OpRemoveInstance: cmdlib.LURemoveInstance,
175
    opcodes.OpRenameInstance: cmdlib.LURenameInstance,
Iustin Pop's avatar
Iustin Pop committed
176
177
178
    opcodes.OpActivateInstanceDisks: cmdlib.LUActivateInstanceDisks,
    opcodes.OpShutdownInstance: cmdlib.LUShutdownInstance,
    opcodes.OpStartupInstance: cmdlib.LUStartupInstance,
179
    opcodes.OpRebootInstance: cmdlib.LURebootInstance,
Iustin Pop's avatar
Iustin Pop committed
180
181
    opcodes.OpDeactivateInstanceDisks: cmdlib.LUDeactivateInstanceDisks,
    opcodes.OpReplaceDisks: cmdlib.LUReplaceDisks,
Iustin Pop's avatar
Iustin Pop committed
182
    opcodes.OpRecreateInstanceDisks: cmdlib.LURecreateInstanceDisks,
Iustin Pop's avatar
Iustin Pop committed
183
    opcodes.OpFailoverInstance: cmdlib.LUFailoverInstance,
184
    opcodes.OpMigrateInstance: cmdlib.LUMigrateInstance,
185
    opcodes.OpMoveInstance: cmdlib.LUMoveInstance,
Iustin Pop's avatar
Iustin Pop committed
186
187
188
    opcodes.OpConnectConsole: cmdlib.LUConnectConsole,
    opcodes.OpQueryInstances: cmdlib.LUQueryInstances,
    opcodes.OpQueryInstanceData: cmdlib.LUQueryInstanceData,
189
    opcodes.OpSetInstanceParams: cmdlib.LUSetInstanceParams,
Iustin Pop's avatar
Iustin Pop committed
190
    opcodes.OpGrowDisk: cmdlib.LUGrowDisk,
191
    # node group lu
192
    opcodes.OpAddGroup: cmdlib.LUAddGroup,
193
    opcodes.OpQueryGroups: cmdlib.LUQueryGroups,
194
    opcodes.OpSetGroupParams: cmdlib.LUSetGroupParams,
195
    opcodes.OpRemoveGroup: cmdlib.LURemoveGroup,
196
    opcodes.OpRenameGroup: cmdlib.LURenameGroup,
Iustin Pop's avatar
Iustin Pop committed
197
198
199
200
    # os lu
    opcodes.OpDiagnoseOS: cmdlib.LUDiagnoseOS,
    # exports lu
    opcodes.OpQueryExports: cmdlib.LUQueryExports,
201
    opcodes.OpPrepareExport: cmdlib.LUPrepareExport,
Iustin Pop's avatar
Iustin Pop committed
202
    opcodes.OpExportInstance: cmdlib.LUExportInstance,
203
    opcodes.OpRemoveExport: cmdlib.LURemoveExport,
204
205
    # tags lu
    opcodes.OpGetTags: cmdlib.LUGetTags,
Iustin Pop's avatar
Iustin Pop committed
206
    opcodes.OpSearchTags: cmdlib.LUSearchTags,
207
208
    opcodes.OpAddTags: cmdlib.LUAddTags,
    opcodes.OpDelTags: cmdlib.LUDelTags,
209
210
    # test lu
    opcodes.OpTestDelay: cmdlib.LUTestDelay,
211
    opcodes.OpTestAllocator: cmdlib.LUTestAllocator,
212
    opcodes.OpTestJobqueue: cmdlib.LUTestJobqueue,
René Nussbaumer's avatar
René Nussbaumer committed
213
    # OOB lu
214
    opcodes.OpOobCommand: cmdlib.LUOobCommand,
Iustin Pop's avatar
Iustin Pop committed
215
216
    }

217
  def __init__(self, context, ec_id):
Iustin Pop's avatar
Iustin Pop committed
218
219
    """Constructor for Processor

220
221
222
223
224
    @type context: GanetiContext
    @param context: global Ganeti context
    @type ec_id: string
    @param ec_id: execution context identifier

Iustin Pop's avatar
Iustin Pop committed
225
    """
226
    self.context = context
227
    self._ec_id = ec_id
228
    self._cbs = None
Iustin Pop's avatar
Iustin Pop committed
229
    self.rpc = rpc.RpcRunner(context.cfg)
230
    self.hmclass = HooksMaster
Iustin Pop's avatar
Iustin Pop committed
231

232
  def _AcquireLocks(self, level, names, shared, timeout, priority):
233
234
235
236
237
238
239
240
241
242
    """Acquires locks via the Ganeti lock manager.

    @type level: int
    @param level: Lock level
    @type names: list or string
    @param names: Lock names
    @type shared: bool
    @param shared: Whether the locks should be acquired in shared mode
    @type timeout: None or float
    @param timeout: Timeout for acquiring the locks
243
244
    @raise LockAcquireTimeout: In case locks couldn't be acquired in specified
        amount of time
245
246

    """
247
248
    if self._cbs:
      self._cbs.CheckCancel()
249
250

    acquired = self.context.glm.acquire(level, names, shared=shared,
251
                                        timeout=timeout, priority=priority)
252

253
254
255
    if acquired is None:
      raise LockAcquireTimeout()

256
257
    return acquired

258
259
260
261
262
263
  def _ExecLU(self, lu):
    """Logical Unit execution sequence.

    """
    write_count = self.context.cfg.write_count
    lu.CheckPrereq()
264
    hm = HooksMaster(self.rpc.call_hooks_runner, lu)
265
266
    h_results = hm.RunPhase(constants.HOOKS_PHASE_PRE)
    lu.HooksCallBack(constants.HOOKS_PHASE_PRE, h_results,
267
                     self.Log, None)
268
269
270
271
272
273
274
275
276

    if getattr(lu.op, "dry_run", False):
      # in this mode, no post-hooks are run, and the config is not
      # written (as it might have been modified by another LU, and we
      # shouldn't do writeout on behalf of other threads
      self.LogInfo("dry-run mode requested, not actually executing"
                   " the operation")
      return lu.dry_run_result

277
    try:
278
      result = lu.Exec(self.Log)
279
280
      h_results = hm.RunPhase(constants.HOOKS_PHASE_POST)
      result = lu.HooksCallBack(constants.HOOKS_PHASE_POST, h_results,
281
                                self.Log, result)
282
283
284
285
286
287
288
    finally:
      # FIXME: This needs locks if not lu_class.REQ_BGL
      if write_count != self.context.cfg.write_count:
        hm.RunConfigUpdate()

    return result

289
  def _LockAndExecLU(self, lu, level, calc_timeout, priority):
290
291
292
293
294
295
296
    """Execute a Logical Unit, with the needed locks.

    This is a recursive function that starts locking the given level, and
    proceeds up, till there are no more locks to acquire. Then it executes the
    given LU and its opcodes.

    """
297
298
    adding_locks = level in lu.add_locks
    acquiring_locks = level in lu.needed_locks
299
    if level not in locking.LEVELS:
300
301
302
      if self._cbs:
        self._cbs.NotifyStart()

303
      result = self._ExecLU(lu)
304

305
306
307
    elif adding_locks and acquiring_locks:
      # We could both acquire and add locks at the same level, but for now we
      # don't need this, so we'll avoid the complicated code needed.
308
309
310
      raise NotImplementedError("Can't declare locks to acquire when adding"
                                " others")

311
    elif adding_locks or acquiring_locks:
Guido Trotter's avatar
Guido Trotter committed
312
      lu.DeclareLocks(level)
Guido Trotter's avatar
Guido Trotter committed
313
      share = lu.share_locks[level]
314

315
      try:
316
317
318
319
320
321
322
        assert adding_locks ^ acquiring_locks, \
          "Locks must be either added or acquired"

        if acquiring_locks:
          # Acquiring locks
          needed_locks = lu.needed_locks[level]

323
          acquired = self._AcquireLocks(level, needed_locks, share,
324
                                        calc_timeout(), priority)
325
326
327
328
329
330
331
332
333
334
        else:
          # Adding locks
          add_locks = lu.add_locks[level]
          lu.remove_locks[level] = add_locks

          try:
            self.context.glm.add(level, add_locks, acquired=1, shared=share)
          except errors.LockError:
            raise errors.OpPrereqError(
              "Couldn't add locks (%s), probably because of a race condition"
335
336
              " with another job, who added them first" % add_locks,
              errors.ECODE_FAULT)
337

338
339
          acquired = add_locks

340
        try:
341
342
          lu.acquired_locks[level] = acquired

343
          result = self._LockAndExecLU(lu, level + 1, calc_timeout, priority)
344
345
346
        finally:
          if level in lu.remove_locks:
            self.context.glm.remove(level, lu.remove_locks[level])
347
      finally:
348
        if self.context.glm.is_owned(level):
349
          self.context.glm.release(level)
350

351
    else:
352
      result = self._LockAndExecLU(lu, level + 1, calc_timeout, priority)
353
354
355

    return result

356
  def ExecOpCode(self, op, cbs, timeout=None, priority=None):
Iustin Pop's avatar
Iustin Pop committed
357
358
    """Execute an opcode.

Iustin Pop's avatar
Iustin Pop committed
359
360
    @type op: an OpCode instance
    @param op: the opcode to be executed
361
362
    @type cbs: L{OpExecCbBase}
    @param cbs: Runtime callbacks
363
364
    @type timeout: float or None
    @param timeout: Maximum time to acquire all locks, None for no timeout
365
366
    @type priority: number or None
    @param priority: Priority for acquiring lock(s)
367
368
    @raise LockAcquireTimeout: In case locks couldn't be acquired in specified
        amount of time
Iustin Pop's avatar
Iustin Pop committed
369
370
371

    """
    if not isinstance(op, opcodes.OpCode):
372
373
      raise errors.ProgrammerError("Non-opcode instance passed"
                                   " to ExecOpcode")
Iustin Pop's avatar
Iustin Pop committed
374

375
376
377
378
379
380
381
    lu_class = self.DISPATCH_TABLE.get(op.__class__, None)
    if lu_class is None:
      raise errors.OpCodeUnknown("Unknown opcode")

    if timeout is None:
      calc_timeout = lambda: None
    else:
382
      calc_timeout = utils.RunningTimeout(timeout, False).Remaining
383

384
    self._cbs = cbs
385
    try:
386
387
388
      # Acquire the Big Ganeti Lock exclusively if this LU requires it,
      # and in a shared fashion otherwise (to prevent concurrent run with
      # an exclusive LU.
389
390
391
      self._AcquireLocks(locking.LEVEL_CLUSTER, locking.BGL,
                          not lu_class.REQ_BGL, calc_timeout(),
                          priority)
392
393
394
395
      try:
        lu = lu_class(self, op, self.context, self.rpc)
        lu.ExpandNames()
        assert lu.needed_locks is not None, "needed_locks not set by LU"
396
397

        try:
398
399
          return self._LockAndExecLU(lu, locking.LEVEL_INSTANCE, calc_timeout,
                                     priority)
400
401
402
403
404
        finally:
          if self._ec_id:
            self.context.cfg.DropECReservations(self._ec_id)
      finally:
        self.context.glm.release(locking.LEVEL_CLUSTER)
405
    finally:
406
      self._cbs = None
407

408
  def Log(self, *args):
409
410
411
412
413
414
    """Forward call to feedback callback function.

    """
    if self._cbs:
      self._cbs.Feedback(*args)

415
416
417
418
  def LogStep(self, current, total, message):
    """Log a change in LU execution progress.

    """
419
    logging.debug("Step %d/%d %s", current, total, message)
420
    self.Log("STEP %d/%d %s" % (current, total, message))
421

422
  def LogWarning(self, message, *args, **kwargs):
423
424
    """Log a warning to the logs and the user.

425
426
427
428
    The optional keyword argument is 'hint' and can be used to show a
    hint to the user (presumably related to the warning). If the
    message is empty, it will not be printed at all, allowing one to
    show only a hint.
429

430
431
432
433
434
435
436
    """
    assert not kwargs or (len(kwargs) == 1 and "hint" in kwargs), \
           "Invalid keyword arguments for LogWarning (%s)" % str(kwargs)
    if args:
      message = message % tuple(args)
    if message:
      logging.warning(message)
437
      self.Log(" - WARNING: %s" % message)
438
    if "hint" in kwargs:
439
      self.Log("      Hint: %s" % kwargs["hint"])
440
441

  def LogInfo(self, message, *args):
442
443
444
    """Log an informational message to the logs and the user.

    """
445
446
    if args:
      message = message % tuple(args)
447
    logging.info(message)
448
    self.Log(" - INFO: %s" % message)
449

450
451
452
453
454
  def GetECId(self):
    if not self._ec_id:
      errors.ProgrammerError("Tried to use execution context id when not set")
    return self._ec_id

Iustin Pop's avatar
Iustin Pop committed
455
456
457
458
459
460
461
462
463
464
465
466
467

class HooksMaster(object):
  """Hooks master.

  This class distributes the run commands to the nodes based on the
  specific LU class.

  In order to remove the direct dependency on the rpc module, the
  constructor needs a function which actually does the remote
  call. This will usually be rpc.call_hooks_runner, but any function
  which behaves the same works.

  """
468
  def __init__(self, callfn, lu):
Iustin Pop's avatar
Iustin Pop committed
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
    self.callfn = callfn
    self.lu = lu
    self.op = lu.op
    self.env, node_list_pre, node_list_post = self._BuildEnv()
    self.node_list = {constants.HOOKS_PHASE_PRE: node_list_pre,
                      constants.HOOKS_PHASE_POST: node_list_post}

  def _BuildEnv(self):
    """Compute the environment and the target nodes.

    Based on the opcode and the current node list, this builds the
    environment for the hooks and the target node list for the run.

    """
    env = {
      "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
      "GANETI_OP_CODE": self.op.OP_ID,
      "GANETI_OBJECT_TYPE": self.lu.HTYPE,
488
      "GANETI_DATA_DIR": constants.DATA_DIR,
Iustin Pop's avatar
Iustin Pop committed
489
490
      }

491
492
493
494
495
496
497
    if self.lu.HPATH is not None:
      lu_env, lu_nodes_pre, lu_nodes_post = self.lu.BuildHooksEnv()
      if lu_env:
        for key in lu_env:
          env["GANETI_" + key] = lu_env[key]
    else:
      lu_nodes_pre = lu_nodes_post = []
Iustin Pop's avatar
Iustin Pop committed
498

499
500
501
502
503
504
505
506
507
508
509
    return env, frozenset(lu_nodes_pre), frozenset(lu_nodes_post)

  def _RunWrapper(self, node_list, hpath, phase):
    """Simple wrapper over self.callfn.

    This method fixes the environment before doing the rpc call.

    """
    env = self.env.copy()
    env["GANETI_HOOKS_PHASE"] = phase
    env["GANETI_HOOKS_PATH"] = hpath
Michael Hanselmann's avatar
Michael Hanselmann committed
510
511
512
    if self.lu.cfg is not None:
      env["GANETI_CLUSTER"] = self.lu.cfg.GetClusterName()
      env["GANETI_MASTER"] = self.lu.cfg.GetMasterNode()
Iustin Pop's avatar
Iustin Pop committed
513

514
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])
Iustin Pop's avatar
Iustin Pop committed
515

516
    return self.callfn(node_list, hpath, phase, env)
Iustin Pop's avatar
Iustin Pop committed
517

518
  def RunPhase(self, phase, nodes=None):
Iustin Pop's avatar
Iustin Pop committed
519
520
521
522
    """Run all the scripts for a phase.

    This is the main function of the HookMaster.

Iustin Pop's avatar
Iustin Pop committed
523
524
    @param phase: one of L{constants.HOOKS_PHASE_POST} or
        L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
525
    @param nodes: overrides the predefined list of nodes for the given phase
Iustin Pop's avatar
Iustin Pop committed
526
527
    @return: the processed results of the hooks multi-node rpc call
    @raise errors.HooksFailure: on communication failure to the nodes
528
    @raise errors.HooksAbort: on failure of one of the hooks
529

Iustin Pop's avatar
Iustin Pop committed
530
    """
531
    if not self.node_list[phase] and not nodes:
532
533
534
      # empty node list, we should not attempt to run this as either
      # we're in the cluster init phase and the rpc client part can't
      # even attempt to run, or this LU doesn't do hooks at all
Iustin Pop's avatar
Iustin Pop committed
535
      return
536
    hpath = self.lu.HPATH
537
538
539
540
    if nodes is not None:
      results = self._RunWrapper(nodes, hpath, phase)
    else:
      results = self._RunWrapper(self.node_list[phase], hpath, phase)
541
542
543
544
545
546
547
    errs = []
    if not results:
      msg = "Communication Failure"
      if phase == constants.HOOKS_PHASE_PRE:
        raise errors.HooksFailure(msg)
      else:
        self.lu.LogWarning(msg)
548
        return results
549
550
551
552
    for node_name in results:
      res = results[node_name]
      if res.offline:
        continue
553
      msg = res.fail_msg
554
555
556
557
558
559
560
      if msg:
        self.lu.LogWarning("Communication failure to node %s: %s",
                           node_name, msg)
        continue
      for script, hkr, output in res.payload:
        if hkr == constants.HKR_FAIL:
          if phase == constants.HOOKS_PHASE_PRE:
Iustin Pop's avatar
Iustin Pop committed
561
            errs.append((node_name, script, output))
562
563
          else:
            if not output:
564
              output = "(no output)"
565
566
567
568
            self.lu.LogWarning("On %s script %s failed, output: %s" %
                               (node_name, script, output))
    if errs and phase == constants.HOOKS_PHASE_PRE:
      raise errors.HooksAbort(errs)
569
    return results
570
571
572
573
574
575
576
577
578
579

  def RunConfigUpdate(self):
    """Run the special configuration update hook

    This is a special hook that runs only on the master after each
    top-level LI if the configuration has been updated.

    """
    phase = constants.HOOKS_PHASE_POST
    hpath = constants.HOOKS_NAME_CFGUPDATE
Michael Hanselmann's avatar
Michael Hanselmann committed
580
    nodes = [self.lu.cfg.GetMasterNode()]
Iustin Pop's avatar
Iustin Pop committed
581
    self._RunWrapper(nodes, hpath, phase)