jqueue.py 18.9 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#
#

# Copyright (C) 2006, 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Module implementing the job queue handling."""

24
import os
Michael Hanselmann's avatar
Michael Hanselmann committed
25 26
import logging
import threading
27 28
import errno
import re
29
import time
Iustin Pop's avatar
Iustin Pop committed
30

Michael Hanselmann's avatar
Michael Hanselmann committed
31
from ganeti import constants
32
from ganeti import serializer
Michael Hanselmann's avatar
Michael Hanselmann committed
33
from ganeti import workerpool
34
from ganeti import opcodes
Iustin Pop's avatar
Iustin Pop committed
35
from ganeti import errors
Michael Hanselmann's avatar
Michael Hanselmann committed
36
from ganeti import mcpu
37
from ganeti import utils
38
from ganeti import jstore
39
from ganeti import rpc
Michael Hanselmann's avatar
Michael Hanselmann committed
40 41 42 43


JOBQUEUE_THREADS = 5

Iustin Pop's avatar
Iustin Pop committed
44

Michael Hanselmann's avatar
Michael Hanselmann committed
45 46 47
class _QueuedOpCode(object):
  """Encasulates an opcode object.

48
  Access is synchronized by the '_lock' attribute.
Michael Hanselmann's avatar
Michael Hanselmann committed
49

50 51 52
  The 'log' attribute holds the execution log and consists of tuples
  of the form (timestamp, level, message).

Michael Hanselmann's avatar
Michael Hanselmann committed
53
  """
Michael Hanselmann's avatar
Michael Hanselmann committed
54 55 56 57 58
  def __new__(cls, *args, **kwargs):
    obj = object.__new__(cls, *args, **kwargs)
    # Create a special lock for logging
    obj._log_lock = threading.Lock()
    return obj
59

Michael Hanselmann's avatar
Michael Hanselmann committed
60 61 62 63 64
  def __init__(self, op):
    self.input = op
    self.status = constants.OP_STATUS_QUEUED
    self.result = None
    self.log = []
65 66 67

  @classmethod
  def Restore(cls, state):
Michael Hanselmann's avatar
Michael Hanselmann committed
68 69 70 71 72
    obj = _QueuedOpCode.__new__(cls)
    obj.input = opcodes.OpCode.LoadOpCode(state["input"])
    obj.status = state["status"]
    obj.result = state["result"]
    obj.log = state["log"]
73 74 75
    return obj

  def Serialize(self):
Michael Hanselmann's avatar
Michael Hanselmann committed
76 77 78 79 80 81 82 83 84 85
    self._log_lock.acquire()
    try:
      return {
        "input": self.input.__getstate__(),
        "status": self.status,
        "result": self.result,
        "log": self.log,
        }
    finally:
      self._log_lock.release()
Michael Hanselmann's avatar
Michael Hanselmann committed
86

87 88 89 90
  def Log(self, *args):
    """Append a log entry.

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
91
    assert len(args) < 3
92 93 94 95 96 97 98

    if len(args) == 1:
      log_type = constants.ELOG_MESSAGE
      log_msg = args[0]
    else:
      log_type, log_msg = args

Michael Hanselmann's avatar
Michael Hanselmann committed
99 100
    self._log_lock.acquire()
    try:
101 102 103
      # The time is split to make serialization easier and not lose more
      # precision.
      self.log.append((utils.SplitTime(time.time()), log_type, log_msg))
Michael Hanselmann's avatar
Michael Hanselmann committed
104 105 106
    finally:
      self._log_lock.release()

107 108 109 110
  def RetrieveLog(self, start_at=0):
    """Retrieve (a part of) the execution log.

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
111 112 113 114 115
    self._log_lock.acquire()
    try:
      return self.log[start_at:]
    finally:
      self._log_lock.release()
116

Michael Hanselmann's avatar
Michael Hanselmann committed
117 118 119 120 121 122 123

class _QueuedJob(object):
  """In-memory job representation.

  This is what we use to track the user-submitted jobs.

  """
124 125 126 127 128 129
  def __new__(cls, *args, **kwargs):
    obj = object.__new__(cls, *args, **kwargs)
    # Condition to wait for changes
    obj.change = threading.Condition()
    return obj

Michael Hanselmann's avatar
Michael Hanselmann committed
130
  def __init__(self, queue, job_id, ops):
Michael Hanselmann's avatar
Michael Hanselmann committed
131 132 133 134
    if not ops:
      # TODO
      raise Exception("No opcodes")

Michael Hanselmann's avatar
Michael Hanselmann committed
135
    self.queue = queue
136
    self.id = job_id
Michael Hanselmann's avatar
Michael Hanselmann committed
137 138
    self.ops = [_QueuedOpCode(op) for op in ops]
    self.run_op_index = -1
139 140

  @classmethod
Michael Hanselmann's avatar
Michael Hanselmann committed
141 142 143 144 145 146
  def Restore(cls, queue, state):
    obj = _QueuedJob.__new__(cls)
    obj.queue = queue
    obj.id = state["id"]
    obj.ops = [_QueuedOpCode.Restore(op_state) for op_state in state["ops"]]
    obj.run_op_index = state["run_op_index"]
147 148 149 150 151
    return obj

  def Serialize(self):
    return {
      "id": self.id,
Michael Hanselmann's avatar
Michael Hanselmann committed
152
      "ops": [op.Serialize() for op in self.ops],
153
      "run_op_index": self.run_op_index,
154 155
      }

Michael Hanselmann's avatar
Michael Hanselmann committed
156
  def CalcStatus(self):
Michael Hanselmann's avatar
Michael Hanselmann committed
157 158 159
    status = constants.JOB_STATUS_QUEUED

    all_success = True
Michael Hanselmann's avatar
Michael Hanselmann committed
160 161
    for op in self.ops:
      if op.status == constants.OP_STATUS_SUCCESS:
Michael Hanselmann's avatar
Michael Hanselmann committed
162 163 164 165
        continue

      all_success = False

Michael Hanselmann's avatar
Michael Hanselmann committed
166
      if op.status == constants.OP_STATUS_QUEUED:
Michael Hanselmann's avatar
Michael Hanselmann committed
167
        pass
Michael Hanselmann's avatar
Michael Hanselmann committed
168
      elif op.status == constants.OP_STATUS_RUNNING:
Michael Hanselmann's avatar
Michael Hanselmann committed
169
        status = constants.JOB_STATUS_RUNNING
Michael Hanselmann's avatar
Michael Hanselmann committed
170
      elif op.status == constants.OP_STATUS_ERROR:
171 172 173
        status = constants.JOB_STATUS_ERROR
        # The whole job fails if one opcode failed
        break
Michael Hanselmann's avatar
Michael Hanselmann committed
174
      elif op.status == constants.OP_STATUS_CANCELED:
175 176
        status = constants.OP_STATUS_CANCELED
        break
Michael Hanselmann's avatar
Michael Hanselmann committed
177 178 179 180 181 182

    if all_success:
      status = constants.JOB_STATUS_SUCCESS

    return status

183

Michael Hanselmann's avatar
Michael Hanselmann committed
184 185
class _JobQueueWorker(workerpool.BaseWorker):
  def RunTask(self, job):
Michael Hanselmann's avatar
Michael Hanselmann committed
186 187
    """Job executor.

Michael Hanselmann's avatar
Michael Hanselmann committed
188
    This functions processes a job.
Michael Hanselmann's avatar
Michael Hanselmann committed
189 190 191 192

    """
    logging.debug("Worker %s processing job %s",
                  self.worker_id, job.id)
193
    proc = mcpu.Processor(self.pool.queue.context)
Michael Hanselmann's avatar
Michael Hanselmann committed
194
    queue = job.queue
Michael Hanselmann's avatar
Michael Hanselmann committed
195
    try:
Michael Hanselmann's avatar
Michael Hanselmann committed
196 197 198 199 200 201 202 203 204 205 206 207 208
      try:
        count = len(job.ops)
        for idx, op in enumerate(job.ops):
          try:
            logging.debug("Op %s/%s: Starting %s", idx + 1, count, op)

            queue.acquire()
            try:
              job.run_op_index = idx
              op.status = constants.OP_STATUS_RUNNING
              op.result = None
              queue.UpdateJobUnlocked(job)

Iustin Pop's avatar
Iustin Pop committed
209
              input_opcode = op.input
Michael Hanselmann's avatar
Michael Hanselmann committed
210 211 212
            finally:
              queue.release()

213 214 215 216 217 218 219 220 221 222
            def _Log(*args):
              op.Log(*args)

              job.change.acquire()
              try:
                job.change.notifyAll()
              finally:
                job.change.release()

            result = proc.ExecOpCode(input_opcode, _Log)
Michael Hanselmann's avatar
Michael Hanselmann committed
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250

            queue.acquire()
            try:
              op.status = constants.OP_STATUS_SUCCESS
              op.result = result
              queue.UpdateJobUnlocked(job)
            finally:
              queue.release()

            logging.debug("Op %s/%s: Successfully finished %s",
                          idx + 1, count, op)
          except Exception, err:
            queue.acquire()
            try:
              try:
                op.status = constants.OP_STATUS_ERROR
                op.result = str(err)
                logging.debug("Op %s/%s: Error in %s", idx + 1, count, op)
              finally:
                queue.UpdateJobUnlocked(job)
            finally:
              queue.release()
            raise

      except errors.GenericError, err:
        logging.exception("Ganeti exception")
      except:
        logging.exception("Unhandled exception")
Michael Hanselmann's avatar
Michael Hanselmann committed
251
    finally:
Michael Hanselmann's avatar
Michael Hanselmann committed
252 253 254 255 256 257
      queue.acquire()
      try:
        job_id = job.id
        status = job.CalcStatus()
      finally:
        queue.release()
Michael Hanselmann's avatar
Michael Hanselmann committed
258
      logging.debug("Worker %s finished job %s, status = %s",
Michael Hanselmann's avatar
Michael Hanselmann committed
259
                    self.worker_id, job_id, status)
Michael Hanselmann's avatar
Michael Hanselmann committed
260 261 262


class _JobQueueWorkerPool(workerpool.WorkerPool):
263
  def __init__(self, queue):
Michael Hanselmann's avatar
Michael Hanselmann committed
264 265
    super(_JobQueueWorkerPool, self).__init__(JOBQUEUE_THREADS,
                                              _JobQueueWorker)
266
    self.queue = queue
Michael Hanselmann's avatar
Michael Hanselmann committed
267 268


Michael Hanselmann's avatar
Michael Hanselmann committed
269
class JobQueue(object):
270
  _RE_JOB_FILE = re.compile(r"^job-(%s)$" % constants.JOB_ID_TEMPLATE)
271

272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
  def _RequireOpenQueue(fn):
    """Decorator for "public" functions.

    This function should be used for all "public" functions. That is, functions
    usually called from other classes.

    Important: Use this decorator only after utils.LockedMethod!

    Example:
      @utils.LockedMethod
      @_RequireOpenQueue
      def Example(self):
        pass

    """
    def wrapper(self, *args, **kwargs):
288
      assert self._queue_lock is not None, "Queue should be open"
289 290 291
      return fn(self, *args, **kwargs)
    return wrapper

Michael Hanselmann's avatar
Michael Hanselmann committed
292
  def __init__(self, context):
293
    self.context = context
Iustin Pop's avatar
Iustin Pop committed
294
    self._memcache = {}
295
    self._my_hostname = utils.HostInfo().name
296

Michael Hanselmann's avatar
Michael Hanselmann committed
297 298 299 300 301
    # Locking
    self._lock = threading.Lock()
    self.acquire = self._lock.acquire
    self.release = self._lock.release

302
    # Initialize
303
    self._queue_lock = jstore.InitAndVerifyQueue(must_lock=True)
304

305 306 307 308
    # Read serial file
    self._last_serial = jstore.ReadSerial()
    assert self._last_serial is not None, ("Serial file was modified between"
                                           " check in jstore and here")
309

310
    # Get initial list of nodes
311 312 313 314 315 316 317
    self._nodes = set(self.context.cfg.GetNodeList())

    # Remove master node
    try:
      self._nodes.remove(self._my_hostname)
    except ValueError:
      pass
318 319 320

    # TODO: Check consistency across nodes

Michael Hanselmann's avatar
Michael Hanselmann committed
321
    # Setup worker pool
322
    self._wpool = _JobQueueWorkerPool(self)
Michael Hanselmann's avatar
Michael Hanselmann committed
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344

    # We need to lock here because WorkerPool.AddTask() may start a job while
    # we're still doing our work.
    self.acquire()
    try:
      for job in self._GetJobsUnlocked(None):
        status = job.CalcStatus()

        if status in (constants.JOB_STATUS_QUEUED, ):
          self._wpool.AddTask(job)

        elif status in (constants.JOB_STATUS_RUNNING, ):
          logging.warning("Unfinished job %s found: %s", job.id, job)
          try:
            for op in job.ops:
              op.status = constants.OP_STATUS_ERROR
              op.result = "Unclean master daemon shutdown"
          finally:
            self.UpdateJobUnlocked(job)
    finally:
      self.release()

345 346 347 348
  @utils.LockedMethod
  @_RequireOpenQueue
  def AddNode(self, node_name):
    assert node_name != self._my_hostname
349

350 351
    # Clean queue directory on added node
    rpc.call_jobqueue_purge(node_name)
352

353 354
    # Upload the whole queue excluding archived jobs
    files = [self._GetJobPath(job_id) for job_id in self._GetJobIDsUnlocked()]
355

356 357 358 359
    # Upload current serial file
    files.append(constants.JOB_QUEUE_SERIAL_FILE)

    for file_name in files:
360 361 362 363 364 365 366 367
      # Read file content
      fd = open(file_name, "r")
      try:
        content = fd.read()
      finally:
        fd.close()

      result = rpc.call_jobqueue_update([node_name], file_name, content)
368 369 370 371 372 373 374 375
      if not result[node_name]:
        logging.error("Failed to upload %s to %s", file_name, node_name)

    self._nodes.add(node_name)

  @utils.LockedMethod
  @_RequireOpenQueue
  def RemoveNode(self, node_name):
376
    try:
377 378 379
      # The queue is removed by the "leave node" RPC call.
      self._nodes.remove(node_name)
    except KeyError:
380 381
      pass

382 383 384 385 386 387
  def _WriteAndReplicateFileUnlocked(self, file_name, data):
    """Writes a file locally and then replicates it to all nodes.

    """
    utils.WriteFile(file_name, data=data)

388
    failed_nodes = 0
389
    result = rpc.call_jobqueue_update(self._nodes, file_name, data)
390
    for node in self._nodes:
391 392 393 394 395 396
      if not result[node]:
        failed_nodes += 1
        logging.error("Copy of job queue file to node %s failed", node)

    # TODO: check failed_nodes

397 398 399 400 401 402 403 404 405 406
  def _RenameFileUnlocked(self, old, new):
    os.rename(old, new)

    result = rpc.call_jobqueue_rename(self._nodes, old, new)
    for node in self._nodes:
      if not result[node]:
        logging.error("Moving %s to %s failed on %s", old, new, node)

    # TODO: check failed nodes

Michael Hanselmann's avatar
Michael Hanselmann committed
407 408 409 410 411 412 413 414
  def _FormatJobID(self, job_id):
    if not isinstance(job_id, (int, long)):
      raise errors.ProgrammerError("Job ID '%s' not numeric" % job_id)
    if job_id < 0:
      raise errors.ProgrammerError("Job ID %s is negative" % job_id)

    return str(job_id)

415
  def _NewSerialUnlocked(self):
416 417 418 419 420 421 422 423 424 425 426
    """Generates a new job identifier.

    Job identifiers are unique during the lifetime of a cluster.

    Returns: A string representing the job identifier.

    """
    # New number
    serial = self._last_serial + 1

    # Write to file
427 428
    self._WriteAndReplicateFileUnlocked(constants.JOB_QUEUE_SERIAL_FILE,
                                        "%s\n" % serial)
429 430 431 432

    # Keep it only if we were able to write the file
    self._last_serial = serial

Michael Hanselmann's avatar
Michael Hanselmann committed
433
    return self._FormatJobID(serial)
434

Michael Hanselmann's avatar
Michael Hanselmann committed
435 436
  @staticmethod
  def _GetJobPath(job_id):
437 438
    return os.path.join(constants.QUEUE_DIR, "job-%s" % job_id)

Michael Hanselmann's avatar
Michael Hanselmann committed
439 440
  @staticmethod
  def _GetArchivedJobPath(job_id):
441 442
    return os.path.join(constants.JOB_QUEUE_ARCHIVE_DIR, "job-%s" % job_id)

Michael Hanselmann's avatar
Michael Hanselmann committed
443 444 445
  @classmethod
  def _ExtractJobID(cls, name):
    m = cls._RE_JOB_FILE.match(name)
446 447 448 449 450
    if m:
      return m.group(1)
    else:
      return None

451 452 453 454 455 456
  def _GetJobIDsUnlocked(self, archived=False):
    """Return all known job IDs.

    If the parameter archived is True, archived jobs IDs will be
    included. Currently this argument is unused.

Iustin Pop's avatar
Iustin Pop committed
457 458 459 460
    The method only looks at disk because it's a requirement that all
    jobs are present on disk (so in the _memcache we don't have any
    extra IDs).

461
    """
462
    jlist = [self._ExtractJobID(name) for name in self._ListJobFiles()]
463 464
    jlist.sort()
    return jlist
465

466 467 468 469
  def _ListJobFiles(self):
    return [name for name in utils.ListVisibleFiles(constants.QUEUE_DIR)
            if self._RE_JOB_FILE.match(name)]

470
  def _LoadJobUnlocked(self, job_id):
Iustin Pop's avatar
Iustin Pop committed
471
    if job_id in self._memcache:
472
      logging.debug("Found job %s in memcache", job_id)
Iustin Pop's avatar
Iustin Pop committed
473 474
      return self._memcache[job_id]

475
    filepath = self._GetJobPath(job_id)
476 477 478 479 480 481 482 483 484 485 486 487
    logging.debug("Loading job from %s", filepath)
    try:
      fd = open(filepath, "r")
    except IOError, err:
      if err.errno in (errno.ENOENT, ):
        return None
      raise
    try:
      data = serializer.LoadJson(fd.read())
    finally:
      fd.close()

Iustin Pop's avatar
Iustin Pop committed
488 489
    job = _QueuedJob.Restore(self, data)
    self._memcache[job_id] = job
490
    logging.debug("Added job %s to the cache", job_id)
Iustin Pop's avatar
Iustin Pop committed
491
    return job
492 493

  def _GetJobsUnlocked(self, job_ids):
494 495
    if not job_ids:
      job_ids = self._GetJobIDsUnlocked()
496

497
    return [self._LoadJobUnlocked(job_id) for job_id in job_ids]
498 499

  @utils.LockedMethod
500
  @_RequireOpenQueue
501
  def SubmitJob(self, ops):
Michael Hanselmann's avatar
Michael Hanselmann committed
502
    """Create and store a new job.
503

Michael Hanselmann's avatar
Michael Hanselmann committed
504 505
    This enters the job into our job queue and also puts it on the new
    queue, in order for it to be picked up by the queue processors.
506 507

    @type ops: list
508
    @param ops: The list of OpCodes that will become the new job.
509 510

    """
511
    # Get job identifier
512
    job_id = self._NewSerialUnlocked()
513 514 515
    job = _QueuedJob(self, job_id, ops)

    # Write to disk
Michael Hanselmann's avatar
Michael Hanselmann committed
516
    self.UpdateJobUnlocked(job)
517

518
    logging.debug("Added new job %s to the cache", job_id)
Iustin Pop's avatar
Iustin Pop committed
519 520
    self._memcache[job_id] = job

Michael Hanselmann's avatar
Michael Hanselmann committed
521 522 523 524
    # Add to worker pool
    self._wpool.AddTask(job)

    return job.id
525

526
  @_RequireOpenQueue
Michael Hanselmann's avatar
Michael Hanselmann committed
527
  def UpdateJobUnlocked(self, job):
528
    filename = self._GetJobPath(job.id)
529
    data = serializer.DumpJson(job.Serialize(), indent=False)
530
    logging.debug("Writing job %s to %s", job.id, filename)
531
    self._WriteAndReplicateFileUnlocked(filename, data)
532
    self._CleanCacheUnlocked([job.id])
Iustin Pop's avatar
Iustin Pop committed
533

534 535 536 537 538 539 540
    # Notify waiters about potential changes
    job.change.acquire()
    try:
      job.change.notifyAll()
    finally:
      job.change.release()

541
  def _CleanCacheUnlocked(self, exclude):
Iustin Pop's avatar
Iustin Pop committed
542 543 544 545 546 547
    """Clean the memory cache.

    The exceptions argument contains job IDs that should not be
    cleaned.

    """
548
    assert isinstance(exclude, list)
Michael Hanselmann's avatar
Michael Hanselmann committed
549

Iustin Pop's avatar
Iustin Pop committed
550
    for job in self._memcache.values():
551
      if job.id in exclude:
Iustin Pop's avatar
Iustin Pop committed
552
        continue
Michael Hanselmann's avatar
Michael Hanselmann committed
553 554
      if job.CalcStatus() not in (constants.JOB_STATUS_QUEUED,
                                  constants.JOB_STATUS_RUNNING):
555
        logging.debug("Cleaning job %s from the cache", job.id)
Iustin Pop's avatar
Iustin Pop committed
556 557 558 559
        try:
          del self._memcache[job.id]
        except KeyError:
          pass
560

561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
  @_RequireOpenQueue
  def WaitForJobChanges(self, job_id, fields, previous):
    logging.debug("Waiting for changes in job %s", job_id)

    while True:
      self.acquire()
      try:
        job = self._LoadJobUnlocked(job_id)
        if not job:
          logging.debug("Job %s not found", job_id)
          new_state = None
          break

        new_state = self._GetJobInfoUnlocked(job, fields)
      finally:
        self.release()

      # Serializing and deserializing data can cause type changes (e.g. from
      # tuple to list) or precision loss. We're doing it here so that we get
      # the same modifications as the data received from the client. Without
      # this, the comparison afterwards might fail without the data being
      # significantly different.
      new_state = serializer.LoadJson(serializer.DumpJson(new_state))

      if previous != new_state:
        break

      job.change.acquire()
      try:
        job.change.wait()
      finally:
        job.change.release()

    logging.debug("Job %s changed", job_id)

    return new_state

598
  @utils.LockedMethod
599
  @_RequireOpenQueue
600 601 602 603 604 605 606 607 608
  def CancelJob(self, job_id):
    """Cancels a job.

    @type job_id: string
    @param job_id: Job ID of job to be cancelled.

    """
    logging.debug("Cancelling job %s", job_id)

Michael Hanselmann's avatar
Michael Hanselmann committed
609
    job = self._LoadJobUnlocked(job_id)
610 611 612 613
    if not job:
      logging.debug("Job %s not found", job_id)
      return

Michael Hanselmann's avatar
Michael Hanselmann committed
614
    if job.CalcStatus() not in (constants.JOB_STATUS_QUEUED,):
615 616 617
      logging.debug("Job %s is no longer in the queue", job.id)
      return

Michael Hanselmann's avatar
Michael Hanselmann committed
618 619 620 621 622 623
    try:
      for op in job.ops:
        op.status = constants.OP_STATUS_ERROR
        op.result = "Job cancelled by request"
    finally:
      self.UpdateJobUnlocked(job)
624

625
  @utils.LockedMethod
626
  @_RequireOpenQueue
627
  def ArchiveJob(self, job_id):
628 629 630 631 632 633 634 635 636 637 638 639 640
    """Archives a job.

    @type job_id: string
    @param job_id: Job ID of job to be archived.

    """
    logging.debug("Archiving job %s", job_id)

    job = self._LoadJobUnlocked(job_id)
    if not job:
      logging.debug("Job %s not found", job_id)
      return

Michael Hanselmann's avatar
Michael Hanselmann committed
641 642 643 644
    if job.CalcStatus() not in (constants.JOB_STATUS_CANCELED,
                                constants.JOB_STATUS_SUCCESS,
                                constants.JOB_STATUS_ERROR):
      logging.debug("Job %s is not yet done", job.id)
645 646 647 648 649 650
      return

    try:
      old = self._GetJobPath(job.id)
      new = self._GetArchivedJobPath(job.id)

651
      self._RenameFileUnlocked(old, new)
652 653 654 655 656 657

      logging.debug("Successfully archived job %s", job.id)
    finally:
      # Cleaning the cache because we don't know what os.rename actually did
      # and to be on the safe side.
      self._CleanCacheUnlocked([])
658

Michael Hanselmann's avatar
Michael Hanselmann committed
659
  def _GetJobInfoUnlocked(self, job, fields):
Michael Hanselmann's avatar
Michael Hanselmann committed
660 661 662 663 664
    row = []
    for fname in fields:
      if fname == "id":
        row.append(job.id)
      elif fname == "status":
Michael Hanselmann's avatar
Michael Hanselmann committed
665
        row.append(job.CalcStatus())
666
      elif fname == "ops":
Michael Hanselmann's avatar
Michael Hanselmann committed
667
        row.append([op.input.__getstate__() for op in job.ops])
668
      elif fname == "opresult":
Michael Hanselmann's avatar
Michael Hanselmann committed
669
        row.append([op.result for op in job.ops])
670
      elif fname == "opstatus":
Michael Hanselmann's avatar
Michael Hanselmann committed
671
        row.append([op.status for op in job.ops])
672
      elif fname == "ticker":
Michael Hanselmann's avatar
Michael Hanselmann committed
673
        ji = job.run_op_index
674 675 676
        if ji < 0:
          lmsg = None
        else:
Michael Hanselmann's avatar
Michael Hanselmann committed
677
          lmsg = job.ops[ji].RetrieveLog(-1)
678 679 680 681 682 683
          # message might be empty here
          if lmsg:
            lmsg = lmsg[0]
          else:
            lmsg = None
        row.append(lmsg)
Michael Hanselmann's avatar
Michael Hanselmann committed
684 685 686 687
      else:
        raise errors.OpExecError("Invalid job query field '%s'" % fname)
    return row

Michael Hanselmann's avatar
Michael Hanselmann committed
688
  @utils.LockedMethod
689
  @_RequireOpenQueue
Michael Hanselmann's avatar
Michael Hanselmann committed
690 691 692 693 694 695 696 697
  def QueryJobs(self, job_ids, fields):
    """Returns a list of jobs in queue.

    Args:
    - job_ids: Sequence of job identifiers or None for all
    - fields: Names of fields to return

    """
Michael Hanselmann's avatar
Michael Hanselmann committed
698
    jobs = []
Michael Hanselmann's avatar
Michael Hanselmann committed
699

Michael Hanselmann's avatar
Michael Hanselmann committed
700 701 702 703 704
    for job in self._GetJobsUnlocked(job_ids):
      if job is None:
        jobs.append(None)
      else:
        jobs.append(self._GetJobInfoUnlocked(job, fields))
Michael Hanselmann's avatar
Michael Hanselmann committed
705

Michael Hanselmann's avatar
Michael Hanselmann committed
706
    return jobs
Michael Hanselmann's avatar
Michael Hanselmann committed
707

708
  @utils.LockedMethod
709
  @_RequireOpenQueue
Michael Hanselmann's avatar
Michael Hanselmann committed
710 711 712 713 714
  def Shutdown(self):
    """Stops the job queue.

    """
    self._wpool.TerminateWorkers()
Michael Hanselmann's avatar
Michael Hanselmann committed
715

716 717
    self._queue_lock.Close()
    self._queue_lock = None