jqueue.py 57 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
2
3
#
#

4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
Iustin Pop's avatar
Iustin Pop committed
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


22
23
"""Module implementing the job queue handling.

24
25
26
27
28
Locking: there's a single, large lock in the L{JobQueue} class. It's
used by all other classes in this module.

@var JOBQUEUE_THREADS: the number of worker threads we start for
    processing jobs
29
30

"""
Iustin Pop's avatar
Iustin Pop committed
31

32
import os
Michael Hanselmann's avatar
Michael Hanselmann committed
33
import logging
34
35
import errno
import re
36
import time
37
import weakref
Iustin Pop's avatar
Iustin Pop committed
38

Guido Trotter's avatar
Guido Trotter committed
39
40
41
42
43
44
45
try:
  # pylint: disable-msg=E0611
  from pyinotify import pyinotify
except ImportError:
  import pyinotify

from ganeti import asyncnotifier
Michael Hanselmann's avatar
Michael Hanselmann committed
46
from ganeti import constants
47
from ganeti import serializer
Michael Hanselmann's avatar
Michael Hanselmann committed
48
from ganeti import workerpool
49
from ganeti import locking
50
from ganeti import opcodes
Iustin Pop's avatar
Iustin Pop committed
51
from ganeti import errors
Michael Hanselmann's avatar
Michael Hanselmann committed
52
from ganeti import mcpu
53
from ganeti import utils
54
from ganeti import jstore
55
from ganeti import rpc
56
from ganeti import runtime
57
from ganeti import netutils
58
from ganeti import compat
Michael Hanselmann's avatar
Michael Hanselmann committed
59

60

61
JOBQUEUE_THREADS = 25
62
JOBS_PER_ARCHIVE_DIRECTORY = 10000
Michael Hanselmann's avatar
Michael Hanselmann committed
63

64
65
66
# member lock names to be passed to @ssynchronized decorator
_LOCK = "_lock"
_QUEUE = "_queue"
67

Iustin Pop's avatar
Iustin Pop committed
68

69
class CancelJob(Exception):
70
71
72
73
74
  """Special exception to cancel a job.

  """


75
def TimeStampNow():
76
77
78
79
80
81
  """Returns the current timestamp.

  @rtype: tuple
  @return: the current time in the (seconds, microseconds) format

  """
82
83
84
  return utils.SplitTime(time.time())


Michael Hanselmann's avatar
Michael Hanselmann committed
85
class _QueuedOpCode(object):
Michael Hanselmann's avatar
Michael Hanselmann committed
86
  """Encapsulates an opcode object.
Michael Hanselmann's avatar
Michael Hanselmann committed
87

88
89
90
91
92
93
  @ivar log: holds the execution log and consists of tuples
  of the form C{(log_serial, timestamp, level, message)}
  @ivar input: the OpCode we encapsulate
  @ivar status: the current status
  @ivar result: the result of the LU execution
  @ivar start_timestamp: timestamp for the start of the execution
Iustin Pop's avatar
Iustin Pop committed
94
  @ivar exec_timestamp: timestamp for the actual LU Exec() function invocation
95
  @ivar stop_timestamp: timestamp for the end of the execution
96

Michael Hanselmann's avatar
Michael Hanselmann committed
97
  """
98
  __slots__ = ["input", "status", "result", "log", "priority",
Iustin Pop's avatar
Iustin Pop committed
99
               "start_timestamp", "exec_timestamp", "end_timestamp",
100
101
               "__weakref__"]

Michael Hanselmann's avatar
Michael Hanselmann committed
102
  def __init__(self, op):
103
104
105
106
107
108
    """Constructor for the _QuededOpCode.

    @type op: L{opcodes.OpCode}
    @param op: the opcode we encapsulate

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
109
110
111
112
    self.input = op
    self.status = constants.OP_STATUS_QUEUED
    self.result = None
    self.log = []
113
    self.start_timestamp = None
Iustin Pop's avatar
Iustin Pop committed
114
    self.exec_timestamp = None
115
    self.end_timestamp = None
116

117
118
119
    # Get initial priority (it might change during the lifetime of this opcode)
    self.priority = getattr(op, "priority", constants.OP_PRIO_DEFAULT)

120
121
  @classmethod
  def Restore(cls, state):
122
123
124
125
126
127
128
129
    """Restore the _QueuedOpCode from the serialized form.

    @type state: dict
    @param state: the serialized state
    @rtype: _QueuedOpCode
    @return: a new _QueuedOpCode instance

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
130
131
132
133
134
    obj = _QueuedOpCode.__new__(cls)
    obj.input = opcodes.OpCode.LoadOpCode(state["input"])
    obj.status = state["status"]
    obj.result = state["result"]
    obj.log = state["log"]
135
    obj.start_timestamp = state.get("start_timestamp", None)
Iustin Pop's avatar
Iustin Pop committed
136
    obj.exec_timestamp = state.get("exec_timestamp", None)
137
    obj.end_timestamp = state.get("end_timestamp", None)
138
    obj.priority = state.get("priority", constants.OP_PRIO_DEFAULT)
139
140
141
    return obj

  def Serialize(self):
142
143
144
145
146
147
    """Serializes this _QueuedOpCode.

    @rtype: dict
    @return: the dictionary holding the serialized state

    """
148
149
150
151
152
    return {
      "input": self.input.__getstate__(),
      "status": self.status,
      "result": self.result,
      "log": self.log,
153
      "start_timestamp": self.start_timestamp,
Iustin Pop's avatar
Iustin Pop committed
154
      "exec_timestamp": self.exec_timestamp,
155
      "end_timestamp": self.end_timestamp,
156
      "priority": self.priority,
157
      }
158

Michael Hanselmann's avatar
Michael Hanselmann committed
159
160
161
162

class _QueuedJob(object):
  """In-memory job representation.

163
164
165
166
167
168
169
170
171
172
173
174
175
  This is what we use to track the user-submitted jobs. Locking must
  be taken care of by users of this class.

  @type queue: L{JobQueue}
  @ivar queue: the parent queue
  @ivar id: the job ID
  @type ops: list
  @ivar ops: the list of _QueuedOpCode that constitute the job
  @type log_serial: int
  @ivar log_serial: holds the index for the next log entry
  @ivar received_timestamp: the timestamp for when the job was received
  @ivar start_timestmap: the timestamp for start of execution
  @ivar end_timestamp: the timestamp for end of execution
Michael Hanselmann's avatar
Michael Hanselmann committed
176
177

  """
Iustin Pop's avatar
Iustin Pop committed
178
  # pylint: disable-msg=W0212
179
  __slots__ = ["queue", "id", "ops", "log_serial", "ops_iter", "cur_opctx",
180
181
182
               "received_timestamp", "start_timestamp", "end_timestamp",
               "__weakref__"]

Michael Hanselmann's avatar
Michael Hanselmann committed
183
  def __init__(self, queue, job_id, ops):
184
185
186
187
188
189
190
191
192
193
194
    """Constructor for the _QueuedJob.

    @type queue: L{JobQueue}
    @param queue: our parent queue
    @type job_id: job_id
    @param job_id: our job id
    @type ops: list
    @param ops: the list of opcodes we hold, which will be encapsulated
        in _QueuedOpCodes

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
195
    if not ops:
Guido Trotter's avatar
Guido Trotter committed
196
      raise errors.GenericError("A job needs at least one opcode")
Michael Hanselmann's avatar
Michael Hanselmann committed
197

Michael Hanselmann's avatar
Michael Hanselmann committed
198
    self.queue = queue
199
    self.id = job_id
Michael Hanselmann's avatar
Michael Hanselmann committed
200
    self.ops = [_QueuedOpCode(op) for op in ops]
201
    self.log_serial = 0
202
203
204
    self.received_timestamp = TimeStampNow()
    self.start_timestamp = None
    self.end_timestamp = None
205

206
207
208
209
210
211
212
    self._InitInMemory(self)

  @staticmethod
  def _InitInMemory(obj):
    """Initializes in-memory variables.

    """
213
    obj.ops_iter = None
214
    obj.cur_opctx = None
215

216
217
218
219
220
221
222
  def __repr__(self):
    status = ["%s.%s" % (self.__class__.__module__, self.__class__.__name__),
              "id=%s" % self.id,
              "ops=%s" % ",".join([op.input.Summary() for op in self.ops])]

    return "<%s at %#x>" % (" ".join(status), id(self))

223
  @classmethod
Michael Hanselmann's avatar
Michael Hanselmann committed
224
  def Restore(cls, queue, state):
225
226
227
228
229
230
231
232
233
234
    """Restore a _QueuedJob from serialized state:

    @type queue: L{JobQueue}
    @param queue: to which queue the restored job belongs
    @type state: dict
    @param state: the serialized state
    @rtype: _JobQueue
    @return: the restored _JobQueue instance

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
235
236
237
    obj = _QueuedJob.__new__(cls)
    obj.queue = queue
    obj.id = state["id"]
238
239
240
    obj.received_timestamp = state.get("received_timestamp", None)
    obj.start_timestamp = state.get("start_timestamp", None)
    obj.end_timestamp = state.get("end_timestamp", None)
241
242
243
244
245
246
247
248
249

    obj.ops = []
    obj.log_serial = 0
    for op_state in state["ops"]:
      op = _QueuedOpCode.Restore(op_state)
      for log_entry in op.log:
        obj.log_serial = max(obj.log_serial, log_entry[0])
      obj.ops.append(op)

250
    cls._InitInMemory(obj)
251

252
253
254
    return obj

  def Serialize(self):
255
256
257
258
259
260
    """Serialize the _JobQueue instance.

    @rtype: dict
    @return: the serialized state

    """
261
262
    return {
      "id": self.id,
Michael Hanselmann's avatar
Michael Hanselmann committed
263
      "ops": [op.Serialize() for op in self.ops],
264
265
266
      "start_timestamp": self.start_timestamp,
      "end_timestamp": self.end_timestamp,
      "received_timestamp": self.received_timestamp,
267
268
      }

Michael Hanselmann's avatar
Michael Hanselmann committed
269
  def CalcStatus(self):
270
271
272
273
274
275
276
277
278
279
    """Compute the status of this job.

    This function iterates over all the _QueuedOpCodes in the job and
    based on their status, computes the job status.

    The algorithm is:
      - if we find a cancelled, or finished with error, the job
        status will be the same
      - otherwise, the last opcode with the status one of:
          - waitlock
280
          - canceling
281
282
283
284
285
286
287
288
289
290
          - running

        will determine the job status

      - otherwise, it means either all opcodes are queued, or success,
        and the job status will be the same

    @return: the job status

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
291
292
293
    status = constants.JOB_STATUS_QUEUED

    all_success = True
Michael Hanselmann's avatar
Michael Hanselmann committed
294
295
    for op in self.ops:
      if op.status == constants.OP_STATUS_SUCCESS:
Michael Hanselmann's avatar
Michael Hanselmann committed
296
297
298
299
        continue

      all_success = False

Michael Hanselmann's avatar
Michael Hanselmann committed
300
      if op.status == constants.OP_STATUS_QUEUED:
Michael Hanselmann's avatar
Michael Hanselmann committed
301
        pass
Iustin Pop's avatar
Iustin Pop committed
302
303
      elif op.status == constants.OP_STATUS_WAITLOCK:
        status = constants.JOB_STATUS_WAITLOCK
Michael Hanselmann's avatar
Michael Hanselmann committed
304
      elif op.status == constants.OP_STATUS_RUNNING:
Michael Hanselmann's avatar
Michael Hanselmann committed
305
        status = constants.JOB_STATUS_RUNNING
306
307
308
      elif op.status == constants.OP_STATUS_CANCELING:
        status = constants.JOB_STATUS_CANCELING
        break
Michael Hanselmann's avatar
Michael Hanselmann committed
309
      elif op.status == constants.OP_STATUS_ERROR:
310
311
312
        status = constants.JOB_STATUS_ERROR
        # The whole job fails if one opcode failed
        break
Michael Hanselmann's avatar
Michael Hanselmann committed
313
      elif op.status == constants.OP_STATUS_CANCELED:
314
315
        status = constants.OP_STATUS_CANCELED
        break
Michael Hanselmann's avatar
Michael Hanselmann committed
316
317
318
319
320
321

    if all_success:
      status = constants.JOB_STATUS_SUCCESS

    return status

322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
  def CalcPriority(self):
    """Gets the current priority for this job.

    Only unfinished opcodes are considered. When all are done, the default
    priority is used.

    @rtype: int

    """
    priorities = [op.priority for op in self.ops
                  if op.status not in constants.OPS_FINALIZED]

    if not priorities:
      # All opcodes are done, assume default priority
      return constants.OP_PRIO_DEFAULT

    return min(priorities)

340
  def GetLogEntries(self, newer_than):
341
342
343
    """Selectively returns the log entries.

    @type newer_than: None or int
Michael Hanselmann's avatar
Michael Hanselmann committed
344
    @param newer_than: if this is None, return all log entries,
345
346
347
348
349
350
        otherwise return only the log entries with serial higher
        than this value
    @rtype: list
    @return: the list of the log entries selected

    """
351
352
353
354
355
356
357
    if newer_than is None:
      serial = -1
    else:
      serial = newer_than

    entries = []
    for op in self.ops:
358
      entries.extend(filter(lambda entry: entry[0] > serial, op.log))
359
360
361

    return entries

362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
  def GetInfo(self, fields):
    """Returns information about a job.

    @type fields: list
    @param fields: names of fields to return
    @rtype: list
    @return: list with one element for each field
    @raise errors.OpExecError: when an invalid field
        has been passed

    """
    row = []
    for fname in fields:
      if fname == "id":
        row.append(self.id)
      elif fname == "status":
        row.append(self.CalcStatus())
      elif fname == "ops":
        row.append([op.input.__getstate__() for op in self.ops])
      elif fname == "opresult":
        row.append([op.result for op in self.ops])
      elif fname == "opstatus":
        row.append([op.status for op in self.ops])
      elif fname == "oplog":
        row.append([op.log for op in self.ops])
      elif fname == "opstart":
        row.append([op.start_timestamp for op in self.ops])
      elif fname == "opexec":
        row.append([op.exec_timestamp for op in self.ops])
      elif fname == "opend":
        row.append([op.end_timestamp for op in self.ops])
      elif fname == "received_ts":
        row.append(self.received_timestamp)
      elif fname == "start_ts":
        row.append(self.start_timestamp)
      elif fname == "end_ts":
        row.append(self.end_timestamp)
      elif fname == "summary":
        row.append([op.input.Summary() for op in self.ops])
      else:
        raise errors.OpExecError("Invalid self query field '%s'" % fname)
    return row

405
406
407
408
409
410
411
412
413
414
415
  def MarkUnfinishedOps(self, status, result):
    """Mark unfinished opcodes with a given status and result.

    This is an utility function for marking all running or waiting to
    be run opcodes with a given status. Opcodes which are already
    finalised are not changed.

    @param status: a given opcode status
    @param result: the opcode result

    """
416
417
418
419
420
421
422
423
    not_marked = True
    for op in self.ops:
      if op.status in constants.OPS_FINALIZED:
        assert not_marked, "Finalized opcodes found after non-finalized ones"
        continue
      op.status = status
      op.result = result
      not_marked = False
424

425
  def Cancel(self):
426
427
428
429
430
431
432
    """Marks job as canceled/-ing if possible.

    @rtype: tuple; (bool, string)
    @return: Boolean describing whether job was successfully canceled or marked
      as canceling and a text message

    """
433
434
435
436
437
    status = self.CalcStatus()

    if status == constants.JOB_STATUS_QUEUED:
      self.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
                             "Job canceled by request")
438
      return (True, "Job %s canceled" % self.id)
439
440
441
442

    elif status == constants.JOB_STATUS_WAITLOCK:
      # The worker will notice the new status and cancel the job
      self.MarkUnfinishedOps(constants.OP_STATUS_CANCELING, None)
443
      return (True, "Job %s will be canceled" % self.id)
444

445
446
447
    else:
      logging.debug("Job %s is no longer waiting in the queue", self.id)
      return (False, "Job %s is no longer waiting in the queue" % self.id)
448

449

450
class _OpExecCallbacks(mcpu.OpExecCbBase):
451
452
  def __init__(self, queue, job, op):
    """Initializes this class.
453

454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
    @type queue: L{JobQueue}
    @param queue: Job queue
    @type job: L{_QueuedJob}
    @param job: Job object
    @type op: L{_QueuedOpCode}
    @param op: OpCode

    """
    assert queue, "Queue is missing"
    assert job, "Job is missing"
    assert op, "Opcode is missing"

    self._queue = queue
    self._job = job
    self._op = op

470
471
472
473
474
475
476
477
478
  def _CheckCancel(self):
    """Raises an exception to cancel the job if asked to.

    """
    # Cancel here if we were asked to
    if self._op.status == constants.OP_STATUS_CANCELING:
      logging.debug("Canceling opcode")
      raise CancelJob()

479
  @locking.ssynchronized(_QUEUE, shared=1)
480
  def NotifyStart(self):
Iustin Pop's avatar
Iustin Pop committed
481
482
    """Mark the opcode as running, not lock-waiting.

483
484
485
486
    This is called from the mcpu code as a notifier function, when the LU is
    finally about to start the Exec() method. Of course, to have end-user
    visible results, the opcode must be initially (before calling into
    Processor.ExecOpCode) set to OP_STATUS_WAITLOCK.
Iustin Pop's avatar
Iustin Pop committed
487
488

    """
489
    assert self._op in self._job.ops
490
491
    assert self._op.status in (constants.OP_STATUS_WAITLOCK,
                               constants.OP_STATUS_CANCELING)
492

493
    # Cancel here if we were asked to
494
    self._CheckCancel()
495

496
    logging.debug("Opcode is now running")
497

498
499
500
501
502
    self._op.status = constants.OP_STATUS_RUNNING
    self._op.exec_timestamp = TimeStampNow()

    # And finally replicate the job status
    self._queue.UpdateJobUnlocked(self._job)
503

504
  @locking.ssynchronized(_QUEUE, shared=1)
505
506
507
508
509
510
511
512
  def _AppendFeedback(self, timestamp, log_type, log_msg):
    """Internal feedback append function, with locks

    """
    self._job.log_serial += 1
    self._op.log.append((self._job.log_serial, timestamp, log_type, log_msg))
    self._queue.UpdateJobUnlocked(self._job, replicate=False)

513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
  def Feedback(self, *args):
    """Append a log entry.

    """
    assert len(args) < 3

    if len(args) == 1:
      log_type = constants.ELOG_MESSAGE
      log_msg = args[0]
    else:
      (log_type, log_msg) = args

    # The time is split to make serialization easier and not lose
    # precision.
    timestamp = utils.SplitTime(time.time())
528
    self._AppendFeedback(timestamp, log_type, log_msg)
529

530
531
  def CheckCancel(self):
    """Check whether job has been cancelled.
532
533

    """
534
535
536
537
538
539
    assert self._op.status in (constants.OP_STATUS_WAITLOCK,
                               constants.OP_STATUS_CANCELING)

    # Cancel here if we were asked to
    self._CheckCancel()

540

541
542
543
class _JobChangesChecker(object):
  def __init__(self, fields, prev_job_info, prev_log_serial):
    """Initializes this class.
Guido Trotter's avatar
Guido Trotter committed
544

545
546
547
548
549
550
    @type fields: list of strings
    @param fields: Fields requested by LUXI client
    @type prev_job_info: string
    @param prev_job_info: previous job info, as passed by the LUXI client
    @type prev_log_serial: string
    @param prev_log_serial: previous job serial, as passed by the LUXI client
Guido Trotter's avatar
Guido Trotter committed
551

552
553
554
555
    """
    self._fields = fields
    self._prev_job_info = prev_job_info
    self._prev_log_serial = prev_log_serial
Guido Trotter's avatar
Guido Trotter committed
556

557
558
  def __call__(self, job):
    """Checks whether job has changed.
Guido Trotter's avatar
Guido Trotter committed
559

560
561
    @type job: L{_QueuedJob}
    @param job: Job object
Guido Trotter's avatar
Guido Trotter committed
562
563

    """
564
565
566
    status = job.CalcStatus()
    job_info = job.GetInfo(self._fields)
    log_entries = job.GetLogEntries(self._prev_log_serial)
Guido Trotter's avatar
Guido Trotter committed
567
568
569
570
571
572
573
574

    # Serializing and deserializing data can cause type changes (e.g. from
    # tuple to list) or precision loss. We're doing it here so that we get
    # the same modifications as the data received from the client. Without
    # this, the comparison afterwards might fail without the data being
    # significantly different.
    # TODO: we just deserialized from disk, investigate how to make sure that
    # the job info and log entries are compatible to avoid this further step.
575
576
577
578
    # TODO: Doing something like in testutils.py:UnifyValueType might be more
    # efficient, though floats will be tricky
    job_info = serializer.LoadJson(serializer.DumpJson(job_info))
    log_entries = serializer.LoadJson(serializer.DumpJson(log_entries))
Guido Trotter's avatar
Guido Trotter committed
579
580
581

    # Don't even try to wait if the job is no longer running, there will be
    # no changes.
582
583
584
585
586
587
588
    if (status not in (constants.JOB_STATUS_QUEUED,
                       constants.JOB_STATUS_RUNNING,
                       constants.JOB_STATUS_WAITLOCK) or
        job_info != self._prev_job_info or
        (log_entries and self._prev_log_serial != log_entries[0][0])):
      logging.debug("Job %s changed", job.id)
      return (job_info, log_entries)
Guido Trotter's avatar
Guido Trotter committed
589

590
591
592
593
594
595
596
597
598
599
    return None


class _JobFileChangesWaiter(object):
  def __init__(self, filename):
    """Initializes this class.

    @type filename: string
    @param filename: Path to job file
    @raises errors.InotifyError: if the notifier cannot be setup
Guido Trotter's avatar
Guido Trotter committed
600

601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
    """
    self._wm = pyinotify.WatchManager()
    self._inotify_handler = \
      asyncnotifier.SingleFileEventHandler(self._wm, self._OnInotify, filename)
    self._notifier = \
      pyinotify.Notifier(self._wm, default_proc_fun=self._inotify_handler)
    try:
      self._inotify_handler.enable()
    except Exception:
      # pyinotify doesn't close file descriptors automatically
      self._notifier.stop()
      raise

  def _OnInotify(self, notifier_enabled):
    """Callback for inotify.

    """
Guido Trotter's avatar
Guido Trotter committed
618
    if not notifier_enabled:
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
      self._inotify_handler.enable()

  def Wait(self, timeout):
    """Waits for the job file to change.

    @type timeout: float
    @param timeout: Timeout in seconds
    @return: Whether there have been events

    """
    assert timeout >= 0
    have_events = self._notifier.check_events(timeout * 1000)
    if have_events:
      self._notifier.read_events()
    self._notifier.process_events()
    return have_events

  def Close(self):
    """Closes underlying notifier and its file descriptor.

    """
    self._notifier.stop()


class _JobChangesWaiter(object):
  def __init__(self, filename):
    """Initializes this class.

    @type filename: string
    @param filename: Path to job file

    """
    self._filewaiter = None
    self._filename = filename
Guido Trotter's avatar
Guido Trotter committed
653

654
655
  def Wait(self, timeout):
    """Waits for a job to change.
Guido Trotter's avatar
Guido Trotter committed
656

657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
    @type timeout: float
    @param timeout: Timeout in seconds
    @return: Whether there have been events

    """
    if self._filewaiter:
      return self._filewaiter.Wait(timeout)

    # Lazy setup: Avoid inotify setup cost when job file has already changed.
    # If this point is reached, return immediately and let caller check the job
    # file again in case there were changes since the last check. This avoids a
    # race condition.
    self._filewaiter = _JobFileChangesWaiter(self._filename)

    return True

  def Close(self):
    """Closes underlying waiter.

    """
    if self._filewaiter:
      self._filewaiter.Close()


class _WaitForJobChangesHelper(object):
  """Helper class using inotify to wait for changes in a job file.

  This class takes a previous job status and serial, and alerts the client when
  the current job status has changed.

  """
  @staticmethod
  def _CheckForChanges(job_load_fn, check_fn):
    job = job_load_fn()
    if not job:
      raise errors.JobLost()

    result = check_fn(job)
    if result is None:
      raise utils.RetryAgain()

    return result

  def __call__(self, filename, job_load_fn,
               fields, prev_job_info, prev_log_serial, timeout):
    """Waits for changes on a job.

    @type filename: string
    @param filename: File on which to wait for changes
    @type job_load_fn: callable
    @param job_load_fn: Function to load job
    @type fields: list of strings
    @param fields: Which fields to check for changes
    @type prev_job_info: list or None
    @param prev_job_info: Last job information returned
    @type prev_log_serial: int
    @param prev_log_serial: Last job message serial number
    @type timeout: float
    @param timeout: maximum time to wait in seconds

    """
Guido Trotter's avatar
Guido Trotter committed
718
    try:
719
720
721
722
723
724
725
726
727
      check_fn = _JobChangesChecker(fields, prev_job_info, prev_log_serial)
      waiter = _JobChangesWaiter(filename)
      try:
        return utils.Retry(compat.partial(self._CheckForChanges,
                                          job_load_fn, check_fn),
                           utils.RETRY_REMAINING_TIME, timeout,
                           wait_fn=waiter.Wait)
      finally:
        waiter.Close()
Guido Trotter's avatar
Guido Trotter committed
728
729
730
731
732
733
    except (errors.InotifyError, errors.JobLost):
      return None
    except utils.RetryTimeout:
      return constants.JOB_NOTCHANGED


734
735
736
737
738
739
740
741
742
743
744
745
def _EncodeOpError(err):
  """Encodes an error which occurred while processing an opcode.

  """
  if isinstance(err, errors.GenericError):
    to_encode = err
  else:
    to_encode = errors.OpExecError(str(err))

  return errors.EncodeException(to_encode)


746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
class _TimeoutStrategyWrapper:
  def __init__(self, fn):
    """Initializes this class.

    """
    self._fn = fn
    self._next = None

  def _Advance(self):
    """Gets the next timeout if necessary.

    """
    if self._next is None:
      self._next = self._fn()

  def Peek(self):
    """Returns the next timeout.

    """
    self._Advance()
    return self._next

  def Next(self):
    """Returns the current timeout and advances the internal state.

    """
    self._Advance()
    result = self._next
    self._next = None
    return result


778
class _OpExecContext:
779
  def __init__(self, op, index, log_prefix, timeout_strategy_factory):
780
781
782
783
784
785
786
787
    """Initializes this class.

    """
    self.op = op
    self.index = index
    self.log_prefix = log_prefix
    self.summary = op.input.Summary()

788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
    self._timeout_strategy_factory = timeout_strategy_factory
    self._ResetTimeoutStrategy()

  def _ResetTimeoutStrategy(self):
    """Creates a new timeout strategy.

    """
    self._timeout_strategy = \
      _TimeoutStrategyWrapper(self._timeout_strategy_factory().NextAttempt)

  def CheckPriorityIncrease(self):
    """Checks whether priority can and should be increased.

    Called when locks couldn't be acquired.

    """
    op = self.op

    # Exhausted all retries and next round should not use blocking acquire
    # for locks?
    if (self._timeout_strategy.Peek() is None and
        op.priority > constants.OP_PRIO_HIGHEST):
      logging.debug("Increasing priority")
      op.priority -= 1
      self._ResetTimeoutStrategy()
      return True

    return False

  def GetNextLockTimeout(self):
    """Returns the next lock acquire timeout.

    """
    return self._timeout_strategy.Next()

823

824
class _JobProcessor(object):
825
826
  def __init__(self, queue, opexec_fn, job,
               _timeout_strategy_factory=mcpu.LockAttemptTimeoutStrategy):
827
828
829
830
831
832
    """Initializes this class.

    """
    self.queue = queue
    self.opexec_fn = opexec_fn
    self.job = job
833
    self._timeout_strategy_factory = _timeout_strategy_factory
834
835

  @staticmethod
836
  def _FindNextOpcode(job, timeout_strategy_factory):
837
838
839
840
    """Locates the next opcode to run.

    @type job: L{_QueuedJob}
    @param job: Job object
841
    @param timeout_strategy_factory: Callable to create new timeout strategy
842
843
844
845
846
847

    """
    # Create some sort of a cache to speed up locating next opcode for future
    # lookups
    # TODO: Consider splitting _QueuedJob.ops into two separate lists, one for
    # pending and one for processed ops.
848
849
    if job.ops_iter is None:
      job.ops_iter = enumerate(job.ops)
850
851
852
853

    # Find next opcode to run
    while True:
      try:
854
        (idx, op) = job.ops_iter.next()
855
856
857
858
859
860
861
      except StopIteration:
        raise errors.ProgrammerError("Called for a finished job")

      if op.status == constants.OP_STATUS_RUNNING:
        # Found an opcode already marked as running
        raise errors.ProgrammerError("Called for job marked as running")

862
863
      opctx = _OpExecContext(op, idx, "Op %s/%s" % (idx + 1, len(job.ops)),
                             timeout_strategy_factory)
864
865
866
867
868
869
870
871
872
873
874
875

      if op.status == constants.OP_STATUS_CANCELED:
        # Cancelled jobs are handled by the caller
        assert not compat.any(i.status != constants.OP_STATUS_CANCELED
                              for i in job.ops[idx:])

      elif op.status in constants.OPS_FINALIZED:
        # This is a job that was partially completed before master daemon
        # shutdown, so it can be expected that some opcodes are already
        # completed successfully (if any did error out, then the whole job
        # should have been aborted and not resubmitted for processing).
        logging.info("%s: opcode %s already processed, skipping",
876
                     opctx.log_prefix, opctx.summary)
877
878
        continue

879
      return opctx
880
881
882
883
884
885
886
887
888

  @staticmethod
  def _MarkWaitlock(job, op):
    """Marks an opcode as waiting for locks.

    The job's start timestamp is also set if necessary.

    @type job: L{_QueuedJob}
    @param job: Job object
889
890
    @type op: L{_QueuedOpCode}
    @param op: Opcode object
891
892
893
894
895
896
897
898
899
900
901

    """
    assert op in job.ops

    op.status = constants.OP_STATUS_WAITLOCK
    op.result = None
    op.start_timestamp = TimeStampNow()

    if job.start_timestamp is None:
      job.start_timestamp = op.start_timestamp

902
  def _ExecOpCodeUnlocked(self, opctx):
903
904
905
    """Processes one opcode and returns the result.

    """
906
907
    op = opctx.op

908
909
    assert op.status == constants.OP_STATUS_WAITLOCK

910
911
    timeout = opctx.GetNextLockTimeout()

912
913
914
    try:
      # Make sure not to hold queue lock while calling ExecOpCode
      result = self.opexec_fn(op.input,
915
                              _OpExecCallbacks(self.queue, self.job, op),
916
                              timeout=timeout, priority=op.priority)
917
918
919
920
921
    except mcpu.LockAcquireTimeout:
      assert timeout is not None, "Received timeout for blocking acquire"
      logging.debug("Couldn't acquire locks in %0.6fs", timeout)
      assert op.status == constants.OP_STATUS_WAITLOCK
      return (constants.OP_STATUS_QUEUED, None)
922
    except CancelJob:
923
      logging.exception("%s: Canceling job", opctx.log_prefix)
924
925
926
      assert op.status == constants.OP_STATUS_CANCELING
      return (constants.OP_STATUS_CANCELING, None)
    except Exception, err: # pylint: disable-msg=W0703
927
928
      logging.exception("%s: Caught exception in %s",
                        opctx.log_prefix, opctx.summary)
929
930
      return (constants.OP_STATUS_ERROR, _EncodeOpError(err))
    else:
931
932
      logging.debug("%s: %s successful",
                    opctx.log_prefix, opctx.summary)
933
934
      return (constants.OP_STATUS_SUCCESS, result)

935
  def __call__(self, _nextop_fn=None):
936
937
    """Continues execution of a job.

938
    @param _nextop_fn: Callback function for tests
939
940
941
942
943
944
945
946
947
948
949
950
951
952
    @rtype: bool
    @return: True if job is finished, False if processor needs to be called
             again

    """
    queue = self.queue
    job = self.job

    logging.debug("Processing job %s", job.id)

    queue.acquire(shared=1)
    try:
      opcount = len(job.ops)

953
954
955
956
957
958
959
960
      # Is a previous opcode still pending?
      if job.cur_opctx:
        opctx = job.cur_opctx
      else:
        if __debug__ and _nextop_fn:
          _nextop_fn()
        opctx = self._FindNextOpcode(job, self._timeout_strategy_factory)

961
      op = opctx.op
962
963
964
965

      # Consistency check
      assert compat.all(i.status in (constants.OP_STATUS_QUEUED,
                                     constants.OP_STATUS_CANCELED)
966
                        for i in job.ops[opctx.index:])
967
968
969
970
971

      assert op.status in (constants.OP_STATUS_QUEUED,
                           constants.OP_STATUS_WAITLOCK,
                           constants.OP_STATUS_CANCELED)

972
973
974
      assert (op.priority <= constants.OP_PRIO_LOWEST and
              op.priority >= constants.OP_PRIO_HIGHEST)

975
976
977
978
979
980
981
982
983
984
      if op.status != constants.OP_STATUS_CANCELED:
        # Prepare to start opcode
        self._MarkWaitlock(job, op)

        assert op.status == constants.OP_STATUS_WAITLOCK
        assert job.CalcStatus() == constants.JOB_STATUS_WAITLOCK

        # Write to disk
        queue.UpdateJobUnlocked(job)

985
986
        logging.info("%s: opcode %s waiting for locks",
                     opctx.log_prefix, opctx.summary)
987
988
989

        queue.release()
        try:
990
          (op_status, op_result) = self._ExecOpCodeUnlocked(opctx)
991
992
993
994
995
996
        finally:
          queue.acquire(shared=1)

        op.status = op_status
        op.result = op_result

997
998
999
        if op.status == constants.OP_STATUS_QUEUED:
          # Couldn't get locks in time
          assert not op.end_timestamp
1000
        else:
1001
1002
          # Finalize opcode
          op.end_timestamp = TimeStampNow()
1003

1004
1005
1006
1007
1008
          if op.status == constants.OP_STATUS_CANCELING:
            assert not compat.any(i.status != constants.OP_STATUS_CANCELING
                                  for i in job.ops[opctx.index:])
          else:
            assert op.status in constants.OPS_FINALIZED
1009

1010
      if op.status == constants.OP_STATUS_QUEUED:
1011
1012
        finalize = False

1013
        opctx.CheckPriorityIncrease()
1014

1015
1016
        # Keep around for another round
        job.cur_opctx = opctx
1017

1018
1019
        assert (op.priority <= constants.OP_PRIO_LOWEST and
                op.priority >= constants.OP_PRIO_HIGHEST)
1020

1021
1022
        # In no case must the status be finalized here
        assert job.CalcStatus() == constants.JOB_STATUS_QUEUED
1023

1024
        queue.UpdateJobUnlocked(job)
1025
1026

      else:
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
        # Ensure all opcodes so far have been successful
        assert (opctx.index == 0 or
                compat.all(i.status == constants.OP_STATUS_SUCCESS
                           for i in job.ops[:opctx.index]))

        # Reset context
        job.cur_opctx = None

        if op.status == constants.OP_STATUS_SUCCESS:
          finalize = False

        elif op.status == constants.OP_STATUS_ERROR:
          # Ensure failed opcode has an exception as its result
          assert errors.GetEncodedError(job.ops[opctx.index].result)

          to_encode = errors.OpExecError("Preceding opcode failed")
          job.MarkUnfinishedOps(constants.OP_STATUS_ERROR,
                                _EncodeOpError(to_encode))
          finalize = True
1046

1047
1048
1049
1050
          # Consistency check
          assert compat.all(i.status == constants.OP_STATUS_ERROR and
                            errors.GetEncodedError(i.result)
                            for i in job.ops[opctx.index:])
1051

1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
        elif op.status == constants.OP_STATUS_CANCELING:
          job.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
                                "Job canceled by request")
          finalize = True

        elif op.status == constants.OP_STATUS_CANCELED:
          finalize = True

        else:
          raise errors.ProgrammerError("Unknown status '%s'" % op.status)

        # Finalizing or last opcode?
        if finalize or opctx.index == (opcount - 1):
          # All opcodes have been run, finalize job
          job.end_timestamp = TimeStampNow()

        # Write to disk. If the job status is final, this is the final write
        # allowed. Once the file has been written, it can be archived anytime.
        queue.UpdateJobUnlocked(job)
1071

1072
1073
1074
        if finalize or opctx.index == (opcount - 1):
          logging.info("Finished job %s, status = %s", job.id, job.CalcStatus())
          return True
1075
1076
1077
1078
1079
1080

      return False
    finally:
      queue.release()


1081
1082
1083
1084
class _JobQueueWorker(workerpool.BaseWorker):
  """The actual job workers.

  """
Iustin Pop's avatar
Iustin Pop committed
1085
  def RunTask(self, job): # pylint: disable-msg=W0221
Michael Hanselmann's avatar
Michael Hanselmann committed
1086
1087
    """Job executor.

1088
1089
    This functions processes a job. It is closely tied to the L{_QueuedJob} and
    L{_QueuedOpCode} classes.
Michael Hanselmann's avatar
Michael Hanselmann committed
1090

1091
1092
1093
    @type job: L{_QueuedJob}
    @param job: the job to be processed

Michael Hanselmann's avatar
Michael Hanselmann committed
1094
    """
1095
1096
1097
    queue = job.queue
    assert queue == self.pool.queue

1098
1099
    self.SetTaskName("Job%s" % job.id)

1100
1101
1102
1103
    proc = mcpu.Processor(queue.context, job.id)

    if not _JobProcessor(queue, proc.ExecOpCode, job)():
      # Schedule again
1104
      raise workerpool.DeferTask(priority=job.CalcPriority())
Michael Hanselmann's avatar
Michael Hanselmann committed
1105
1106
1107


class _JobQueueWorkerPool(workerpool.WorkerPool):
1108
1109
1110
  """Simple class implementing a job-processing workerpool.

  """
1111
  def __init__(self, queue):
1112
1113
    super(_JobQueueWorkerPool, self).__init__("JobQueue",
                                              JOBQUEUE_THREADS,
Michael Hanselmann's avatar
Michael Hanselmann committed
1114
                                              _JobQueueWorker)
1115
    self.queue = queue
Michael Hanselmann's avatar
Michael Hanselmann committed
1116
1117


Iustin Pop's avatar
Iustin Pop committed
1118
1119
def _RequireOpenQueue(fn):
  """Decorator for "public" functions.
1120

Iustin Pop's avatar
Iustin Pop committed
1121
1122
1123
1124
  This function should be used for all 'public' functions. That is,
  functions usually called from other classes. Note that this should
  be applied only to methods (not plain functions), since it expects
  that the decorated function is called with a first argument that has
1125
  a '_queue_filelock' argument.
1126

1127
  @warning: Use this decorator only after locking.ssynchronized
1128

Iustin Pop's avatar
Iustin Pop committed
1129
  Example::
1130
    @locking.ssynchronized(_LOCK)
Iustin Pop's avatar
Iustin Pop committed
1131
1132
1133
    @_RequireOpenQueue
    def Example(self):
      pass
1134

Iustin Pop's avatar
Iustin Pop committed
1135
1136
  """
  def wrapper(self, *args, **kwargs):
Iustin Pop's avatar
Iustin Pop committed
1137
    # pylint: disable-msg=W0212
1138
    assert self._queue_filelock is not None, "Queue should be open"
Iustin Pop's avatar
Iustin Pop committed
1139
1140
    return fn(self, *args, **kwargs)
  return wrapper
1141
1142


Iustin Pop's avatar
Iustin Pop committed
1143
1144
class JobQueue(object):
  """Queue used to manage the jobs.
1145

Iustin Pop's avatar
Iustin Pop committed
1146
1147
1148
1149
  @cvar _RE_JOB_FILE: regex matching the valid job file names

  """
  _RE_JOB_FILE = re.compile(r"^job-(%s)$" % constants.JOB_ID_TEMPLATE)
1150

Michael Hanselmann's avatar
Michael Hanselmann committed
1151
  def __init__(self, context):
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
    """Constructor for JobQueue.

    The constructor will initialize the job queue object and then
    start loading the current jobs from disk, either for starting them
    (if they were queue) or for aborting them (if they were already
    running).

    @type context: GanetiContext
    @param context: the context object for access to the configuration
        data and other ganeti objects

    """
1164
    self.context = context
1165
    self._memcache = weakref.WeakValueDictionary()
1166
    self._my_hostname = netutils.Hostname.GetSysName()
1167

1168
1169
1170
1171
1172
    # The Big JobQueue lock. If a code block or method acquires it in shared
    # mode safe it must guarantee concurrency with all the code acquiring it in
    # shared mode, including itself. In order not to acquire it at all
    # concurrency must be guaranteed with all code acquiring it in shared mode
    # and all code acquiring it exclusively.
1173
    self._lock = locking.SharedLock("JobQueue")
1174
1175
1176

    self.acquire = self._lock.acquire
    self.release = self._lock.release
Michael Hanselmann's avatar
Michael Hanselmann committed
1177

1178
1179
1180
    # Initialize the queue, and acquire the filelock.
    # This ensures no other process is working on the job queue.
    self._queue_filelock = jstore.InitAndVerifyQueue(must_lock=True)
1181

1182
1183
1184
1185
    # Read serial file
    self._last_serial = jstore.ReadSerial()
    assert self._last_serial is not None, ("Serial file was modified between"
                                           " check in jstore and here")
1186

1187
    # Get initial list of nodes
1188
    self._nodes = dict((n.name, n.primary_ip)
1189
1190
                       for n in self.context.cfg.GetAllNodesInfo().values()
                       if n.master_candidate)
1191
1192

    # Remove master node
1193
    self._nodes.pop(self._my_hostname, None)
1194
1195
1196

    # TODO: Check consistency across nodes

1197
1198
1199
1200
    self._queue_size = 0
    self._UpdateQueueSizeUnlocked()
    self._drained = self._IsQueueMarkedDrain()

Michael Hanselmann's avatar
Michael Hanselmann committed
1201
    # Setup worker pool
1202
    self._wpool = _JobQueueWorkerPool(self)
Michael Hanselmann's avatar
Michael Hanselmann committed
1203
    try:
1204
1205
1206
1207
      self._InspectQueue()
    except:
      self._wpool.TerminateWorkers()
      raise
1208

1209
1210
1211
1212
1213
1214
1215
  @locking.ssynchronized(_LOCK)
  @_RequireOpenQueue
  def _InspectQueue(self):
    """Loads the whole job queue and resumes unfinished jobs.

    This function needs the lock here because WorkerPool.AddTask() may start a
    job while we're still doing our work.
1216

1217
1218
1219
    """
    logging.info("Inspecting job queue")

1220
1221
    restartjobs = []

1222
1223
1224
1225
1226
1227
1228
1229
1230
    all_job_ids = self._GetJobIDsUnlocked()
    jobs_count = len(all_job_ids)
    lastinfo = time.time()
    for idx, job_id in enumerate(all_job_ids):
      # Give an update every 1000 jobs or 10 seconds
      if (idx % 1000 == 0 or time.time() >= (lastinfo + 10.0) or
          idx == (jobs_count - 1)):
        logging.info("Job queue inspection: %d/%d (%0.1f %%)",
                     idx, jobs_count - 1, 100.0 * (idx + 1) / jobs_count)
1231
        lastinfo = time.time()
1232

1233
      job = self._LoadJobUnlocked(job_id)
Michael Hanselmann's avatar
Michael Hanselmann committed
1234

1235
1236
1237
      # a failure in loading the job can cause 'None' to be returned
      if job is None:
        continue
Michael Hanselmann's avatar
Michael Hanselmann committed
1238

1239
      status = job.CalcStatus()
1240

1241
      if status in (constants.JOB_STATUS_QUEUED, ):
1242
        restartjobs.append(job)
1243
1244

      elif status in (constants.JOB_STATUS_RUNNING,
1245
                      constants.JOB_STATUS_WAITLOCK,
1246
1247
1248
1249
1250
1251
                      constants.JOB_STATUS_CANCELING):
        logging.warning("Unfinished job %s found: %s", job.id, job)
        job.MarkUnfinishedOps(constants.OP_STATUS_ERROR,
                              "Unclean master daemon shutdown")
        self.UpdateJobUnlocked(job)

1252
1253
1254
1255
    if restartjobs:
      logging.info("Restarting %s jobs", len(restartjobs))
      self._EnqueueJobs(restartjobs)

1256
    logging.info("Job queue inspection finished")
Michael Hanselmann's avatar
Michael Hanselmann committed
1257

1258
  @locking.ssynchronized(_LOCK)
1259
  @_RequireOpenQueue
1260
1261
1262
1263
1264
1265
1266
1267
  def AddNode(self, node):
    """Register a new node with the queue.

    @type node: L{objects.Node}
    @param node: the node object to be added

    """
    node_name = node.name
1268
    assert node_name != self._my_hostname
1269

1270
    # Clean queue directory on added node
1271