jqueue.py 18.1 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#
#

# Copyright (C) 2006, 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Module implementing the job queue handling."""

24
import os
Michael Hanselmann's avatar
Michael Hanselmann committed
25 26
import logging
import threading
27 28
import errno
import re
29
import time
Iustin Pop's avatar
Iustin Pop committed
30

Michael Hanselmann's avatar
Michael Hanselmann committed
31
from ganeti import constants
32
from ganeti import serializer
Michael Hanselmann's avatar
Michael Hanselmann committed
33
from ganeti import workerpool
34
from ganeti import opcodes
Iustin Pop's avatar
Iustin Pop committed
35
from ganeti import errors
Michael Hanselmann's avatar
Michael Hanselmann committed
36
from ganeti import mcpu
37
from ganeti import utils
38
from ganeti import rpc
Michael Hanselmann's avatar
Michael Hanselmann committed
39 40 41 42


JOBQUEUE_THREADS = 5

Iustin Pop's avatar
Iustin Pop committed
43

Michael Hanselmann's avatar
Michael Hanselmann committed
44 45 46
class _QueuedOpCode(object):
  """Encasulates an opcode object.

47
  Access is synchronized by the '_lock' attribute.
Michael Hanselmann's avatar
Michael Hanselmann committed
48

49 50 51
  The 'log' attribute holds the execution log and consists of tuples
  of the form (timestamp, level, message).

Michael Hanselmann's avatar
Michael Hanselmann committed
52 53
  """
  def __init__(self, op):
54
    self.__Setup(op, constants.OP_STATUS_QUEUED, None, [])
55

56
  def __Setup(self, input_, status, result, log):
57
    self._lock = threading.Lock()
58
    self.input = input_
59 60
    self.status = status
    self.result = result
61
    self.log = log
62 63 64 65 66

  @classmethod
  def Restore(cls, state):
    obj = object.__new__(cls)
    obj.__Setup(opcodes.OpCode.LoadOpCode(state["input"]),
67
                state["status"], state["result"], state["log"])
68 69 70 71 72 73 74 75
    return obj

  @utils.LockedMethod
  def Serialize(self):
    return {
      "input": self.input.__getstate__(),
      "status": self.status,
      "result": self.result,
76
      "log": self.log,
77
      }
78

79 80 81 82 83 84 85
  @utils.LockedMethod
  def GetInput(self):
    """Returns the original opcode.

    """
    return self.input

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
  @utils.LockedMethod
  def SetStatus(self, status, result):
    """Update the opcode status and result.

    """
    self.status = status
    self.result = result

  @utils.LockedMethod
  def GetStatus(self):
    """Get the opcode status.

    """
    return self.status

  @utils.LockedMethod
  def GetResult(self):
    """Get the opcode result.

    """
    return self.result
Michael Hanselmann's avatar
Michael Hanselmann committed
107

108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
  @utils.LockedMethod
  def Log(self, *args):
    """Append a log entry.

    """
    assert len(args) < 2

    if len(args) == 1:
      log_type = constants.ELOG_MESSAGE
      log_msg = args[0]
    else:
      log_type, log_msg = args
    self.log.append((time.time(), log_type, log_msg))

  @utils.LockedMethod
  def RetrieveLog(self, start_at=0):
    """Retrieve (a part of) the execution log.

    """
    return self.log[start_at:]

Michael Hanselmann's avatar
Michael Hanselmann committed
129 130 131 132 133 134 135

class _QueuedJob(object):
  """In-memory job representation.

  This is what we use to track the user-submitted jobs.

  """
136
  def __init__(self, storage, job_id, ops):
Michael Hanselmann's avatar
Michael Hanselmann committed
137 138 139 140
    if not ops:
      # TODO
      raise Exception("No opcodes")

141
    self.__Setup(storage, job_id, [_QueuedOpCode(op) for op in ops], -1)
Michael Hanselmann's avatar
Michael Hanselmann committed
142

143 144
  def __Setup(self, storage, job_id, ops, run_op_index):
    self._lock = threading.Lock()
145 146 147
    self.storage = storage
    self.id = job_id
    self._ops = ops
148
    self.run_op_index = run_op_index
149 150 151 152

  @classmethod
  def Restore(cls, storage, state):
    obj = object.__new__(cls)
153 154
    op_list = [_QueuedOpCode.Restore(op_state) for op_state in state["ops"]]
    obj.__Setup(storage, state["id"], op_list, state["run_op_index"])
155 156 157 158 159 160
    return obj

  def Serialize(self):
    return {
      "id": self.id,
      "ops": [op.Serialize() for op in self._ops],
161
      "run_op_index": self.run_op_index,
162 163
      }

164
  def _SetStatus(self, status, msg):
165 166
    try:
      for op in self._ops:
167
        op.SetStatus(status, msg)
168 169
    finally:
      self.storage.UpdateJob(self)
Michael Hanselmann's avatar
Michael Hanselmann committed
170

171 172 173 174 175 176
  def SetUnclean(self, msg):
    return self._SetStatus(constants.OP_STATUS_ERROR, msg)

  def SetCanceled(self, msg):
    return self._SetStatus(constants.JOB_STATUS_CANCELED, msg)

177
  def GetStatus(self):
Michael Hanselmann's avatar
Michael Hanselmann committed
178 179 180 181
    status = constants.JOB_STATUS_QUEUED

    all_success = True
    for op in self._ops:
182 183
      op_status = op.GetStatus()
      if op_status == constants.OP_STATUS_SUCCESS:
Michael Hanselmann's avatar
Michael Hanselmann committed
184 185 186 187
        continue

      all_success = False

188
      if op_status == constants.OP_STATUS_QUEUED:
Michael Hanselmann's avatar
Michael Hanselmann committed
189
        pass
190
      elif op_status == constants.OP_STATUS_RUNNING:
Michael Hanselmann's avatar
Michael Hanselmann committed
191
        status = constants.JOB_STATUS_RUNNING
192 193 194 195
      elif op_status == constants.OP_STATUS_ERROR:
        status = constants.JOB_STATUS_ERROR
        # The whole job fails if one opcode failed
        break
196 197 198
      elif op_status == constants.OP_STATUS_CANCELED:
        status = constants.OP_STATUS_CANCELED
        break
Michael Hanselmann's avatar
Michael Hanselmann committed
199 200 201 202 203 204

    if all_success:
      status = constants.JOB_STATUS_SUCCESS

    return status

205 206 207 208
  @utils.LockedMethod
  def GetRunOpIndex(self):
    return self.run_op_index

Michael Hanselmann's avatar
Michael Hanselmann committed
209 210 211 212 213 214 215 216 217 218 219
  def Run(self, proc):
    """Job executor.

    This functions processes a this job in the context of given processor
    instance.

    Args:
    - proc: Ganeti Processor to run the job with

    """
    try:
220 221
      count = len(self._ops)
      for idx, op in enumerate(self._ops):
Michael Hanselmann's avatar
Michael Hanselmann committed
222
        try:
223
          logging.debug("Op %s/%s: Starting %s", idx + 1, count, op)
224 225 226 227 228 229 230

          self._lock.acquire()
          try:
            self.run_op_index = idx
          finally:
            self._lock.release()

231
          op.SetStatus(constants.OP_STATUS_RUNNING, None)
232
          self.storage.UpdateJob(self)
Michael Hanselmann's avatar
Michael Hanselmann committed
233

234
          result = proc.ExecOpCode(op.input, op.Log)
Michael Hanselmann's avatar
Michael Hanselmann committed
235

236
          op.SetStatus(constants.OP_STATUS_SUCCESS, result)
237
          self.storage.UpdateJob(self)
238 239
          logging.debug("Op %s/%s: Successfully finished %s",
                        idx + 1, count, op)
Michael Hanselmann's avatar
Michael Hanselmann committed
240
        except Exception, err:
241 242 243 244 245
          try:
            op.SetStatus(constants.OP_STATUS_ERROR, str(err))
            logging.debug("Op %s/%s: Error in %s", idx + 1, count, op)
          finally:
            self.storage.UpdateJob(self)
Michael Hanselmann's avatar
Michael Hanselmann committed
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
          raise

    except errors.GenericError, err:
      logging.error("ganeti exception %s", exc_info=err)
    except Exception, err:
      logging.error("unhandled exception %s", exc_info=err)
    except:
      logging.error("unhandled unknown exception %s", exc_info=err)


class _JobQueueWorker(workerpool.BaseWorker):
  def RunTask(self, job):
    logging.debug("Worker %s processing job %s",
                  self.worker_id, job.id)
    # TODO: feedback function
261
    proc = mcpu.Processor(self.pool.context)
Michael Hanselmann's avatar
Michael Hanselmann committed
262 263 264 265 266 267 268 269 270 271 272 273 274 275
    try:
      job.Run(proc)
    finally:
      logging.debug("Worker %s finished job %s, status = %s",
                    self.worker_id, job.id, job.GetStatus())


class _JobQueueWorkerPool(workerpool.WorkerPool):
  def __init__(self, context):
    super(_JobQueueWorkerPool, self).__init__(JOBQUEUE_THREADS,
                                              _JobQueueWorker)
    self.context = context


276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
class JobStorageBase(object):
  def __init__(self, id_prefix):
    self.id_prefix = id_prefix

    if id_prefix:
      prefix_pattern = re.escape("%s-" % id_prefix)
    else:
      prefix_pattern = ""

    # Apart from the prefix, all job IDs are numeric
    self._re_job_id = re.compile(r"^%s\d+$" % prefix_pattern)

  def OwnsJobId(self, job_id):
    return self._re_job_id.match(job_id)

  def FormatJobID(self, job_id):
    if not isinstance(job_id, (int, long)):
      raise errors.ProgrammerError("Job ID '%s' not numeric" % job_id)
    if job_id < 0:
      raise errors.ProgrammerError("Job ID %s is negative" % job_id)

    if self.id_prefix:
      prefix = "%s-" % self.id_prefix
    else:
      prefix = ""

    return "%s%010d" % (prefix, job_id)

304 305 306 307 308 309 310 311
  def _ShouldJobBeArchivedUnlocked(self, job):
    if job.GetStatus() not in (constants.JOB_STATUS_CANCELED,
                               constants.JOB_STATUS_SUCCESS,
                               constants.JOB_STATUS_ERROR):
      logging.debug("Job %s is not yet done", job.id)
      return False
    return True

312 313

class DiskJobStorage(JobStorageBase):
314
  _RE_JOB_FILE = re.compile(r"^job-(%s)$" % constants.JOB_ID_TEMPLATE)
315

316 317 318
  def __init__(self, id_prefix):
    JobStorageBase.__init__(self, id_prefix)

319
    self._lock = threading.Lock()
Iustin Pop's avatar
Iustin Pop committed
320
    self._memcache = {}
321
    self._my_hostname = utils.HostInfo().name
322

323 324 325 326 327 328 329
    # Make sure our directories exists
    for path in (constants.QUEUE_DIR, constants.JOB_QUEUE_ARCHIVE_DIR):
      try:
        os.mkdir(path, 0700)
      except OSError, err:
        if err.errno not in (errno.EEXIST, ):
          raise
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362

    # Get queue lock
    self.lock_fd = open(constants.JOB_QUEUE_LOCK_FILE, "w")
    try:
      utils.LockFile(self.lock_fd)
    except:
      self.lock_fd.close()
      raise

    # Read version
    try:
      version_fd = open(constants.JOB_QUEUE_VERSION_FILE, "r")
    except IOError, err:
      if err.errno not in (errno.ENOENT, ):
        raise

      # Setup a new queue
      self._InitQueueUnlocked()

      # Try to open again
      version_fd = open(constants.JOB_QUEUE_VERSION_FILE, "r")

    try:
      # Try to read version
      version = int(version_fd.read(128))

      # Verify version
      if version != constants.JOB_QUEUE_VERSION:
        raise errors.JobQueueError("Found version %s, expected %s",
                                   version, constants.JOB_QUEUE_VERSION)
    finally:
      version_fd.close()

363 364 365 366 367 368 369 370 371 372 373 374 375 376
    self._last_serial = self._ReadSerial()
    if self._last_serial is None:
      raise errors.ConfigurationError("Can't read/parse the job queue serial"
                                      " file")

  @staticmethod
  def _ReadSerial():
    """Try to read the job serial file.

    @rtype: None or int
    @return: If the serial can be read, then it is returned. Otherwise None
             is returned.

    """
377
    try:
378 379 380 381 382 383 384 385 386 387
      serial_fd = open(constants.JOB_QUEUE_SERIAL_FILE, "r")
      try:
        # Read last serial
        serial = int(serial_fd.read(1024).strip())
      finally:
        serial_fd.close()
    except (ValueError, EnvironmentError):
      serial = None

    return serial
388 389 390 391 392 393 394 395 396 397 398 399

  def Close(self):
    assert self.lock_fd, "Queue should be open"

    self.lock_fd.close()
    self.lock_fd = None

  def _InitQueueUnlocked(self):
    assert self.lock_fd, "Queue should be open"

    utils.WriteFile(constants.JOB_QUEUE_VERSION_FILE,
                    data="%s\n" % constants.JOB_QUEUE_VERSION)
400 401 402
    if self._ReadSerial() is None:
      utils.WriteFile(constants.JOB_QUEUE_SERIAL_FILE,
                      data="%s\n" % 0)
403

404
  def _NewSerialUnlocked(self, nodes):
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
    """Generates a new job identifier.

    Job identifiers are unique during the lifetime of a cluster.

    Returns: A string representing the job identifier.

    """
    assert self.lock_fd, "Queue should be open"

    # New number
    serial = self._last_serial + 1

    # Write to file
    utils.WriteFile(constants.JOB_QUEUE_SERIAL_FILE,
                    data="%s\n" % serial)

    # Keep it only if we were able to write the file
    self._last_serial = serial

424 425 426 427 428 429 430 431 432 433 434
    # Distribute the serial to the other nodes
    try:
      nodes.remove(self._my_hostname)
    except ValueError:
      pass

    result = rpc.call_upload_file(nodes, constants.JOB_QUEUE_SERIAL_FILE)
    for node in nodes:
      if not result[node]:
        logging.error("copy of job queue file to node %s failed", node)

435
    return self.FormatJobID(serial)
436 437 438 439

  def _GetJobPath(self, job_id):
    return os.path.join(constants.QUEUE_DIR, "job-%s" % job_id)

440 441 442
  def _GetArchivedJobPath(self, job_id):
    return os.path.join(constants.JOB_QUEUE_ARCHIVE_DIR, "job-%s" % job_id)

443 444 445 446 447 448 449
  def _ExtractJobID(self, name):
    m = self._RE_JOB_FILE.match(name)
    if m:
      return m.group(1)
    else:
      return None

450 451 452 453 454 455
  def _GetJobIDsUnlocked(self, archived=False):
    """Return all known job IDs.

    If the parameter archived is True, archived jobs IDs will be
    included. Currently this argument is unused.

Iustin Pop's avatar
Iustin Pop committed
456 457 458 459
    The method only looks at disk because it's a requirement that all
    jobs are present on disk (so in the _memcache we don't have any
    extra IDs).

460
    """
461
    jlist = [self._ExtractJobID(name) for name in self._ListJobFiles()]
462 463
    jlist.sort()
    return jlist
464

465 466 467 468 469 470
  def _ListJobFiles(self):
    assert self.lock_fd, "Queue should be open"

    return [name for name in utils.ListVisibleFiles(constants.QUEUE_DIR)
            if self._RE_JOB_FILE.match(name)]

471
  def _LoadJobUnlocked(self, job_id):
472 473
    assert self.lock_fd, "Queue should be open"

Iustin Pop's avatar
Iustin Pop committed
474
    if job_id in self._memcache:
475
      logging.debug("Found job %s in memcache", job_id)
Iustin Pop's avatar
Iustin Pop committed
476 477
      return self._memcache[job_id]

478
    filepath = self._GetJobPath(job_id)
479 480 481 482 483 484 485 486 487 488 489 490
    logging.debug("Loading job from %s", filepath)
    try:
      fd = open(filepath, "r")
    except IOError, err:
      if err.errno in (errno.ENOENT, ):
        return None
      raise
    try:
      data = serializer.LoadJson(fd.read())
    finally:
      fd.close()

Iustin Pop's avatar
Iustin Pop committed
491 492
    job = _QueuedJob.Restore(self, data)
    self._memcache[job_id] = job
493
    logging.debug("Added job %s to the cache", job_id)
Iustin Pop's avatar
Iustin Pop committed
494
    return job
495 496

  def _GetJobsUnlocked(self, job_ids):
497 498
    if not job_ids:
      job_ids = self._GetJobIDsUnlocked()
499

500
    return [self._LoadJobUnlocked(job_id) for job_id in job_ids]
501 502 503 504 505 506

  @utils.LockedMethod
  def GetJobs(self, job_ids):
    return self._GetJobsUnlocked(job_ids)

  @utils.LockedMethod
507 508 509 510
  def AddJob(self, ops, nodes):
    """Create and store on disk a new job.

    @type ops: list
511
    @param ops: The list of OpCodes that will become the new job.
512 513 514 515 516
    @type nodes: list
    @param nodes: The list of nodes to which the new job serial will be
                  distributed.

    """
517 518 519
    assert self.lock_fd, "Queue should be open"

    # Get job identifier
520
    job_id = self._NewSerialUnlocked(nodes)
521 522 523 524 525
    job = _QueuedJob(self, job_id, ops)

    # Write to disk
    self._UpdateJobUnlocked(job)

526
    logging.debug("Added new job %s to the cache", job_id)
Iustin Pop's avatar
Iustin Pop committed
527 528
    self._memcache[job_id] = job

529 530 531 532 533 534 535 536 537
    return job

  def _UpdateJobUnlocked(self, job):
    assert self.lock_fd, "Queue should be open"

    filename = self._GetJobPath(job.id)
    logging.debug("Writing job %s to %s", job.id, filename)
    utils.WriteFile(filename,
                    data=serializer.DumpJson(job.Serialize(), indent=False))
538
    self._CleanCacheUnlocked([job.id])
Iustin Pop's avatar
Iustin Pop committed
539

540
  def _CleanCacheUnlocked(self, exclude):
Iustin Pop's avatar
Iustin Pop committed
541 542 543 544 545 546
    """Clean the memory cache.

    The exceptions argument contains job IDs that should not be
    cleaned.

    """
547
    assert isinstance(exclude, list)
Iustin Pop's avatar
Iustin Pop committed
548
    for job in self._memcache.values():
549
      if job.id in exclude:
Iustin Pop's avatar
Iustin Pop committed
550 551 552
        continue
      if job.GetStatus() not in (constants.JOB_STATUS_QUEUED,
                                 constants.JOB_STATUS_RUNNING):
553
        logging.debug("Cleaning job %s from the cache", job.id)
Iustin Pop's avatar
Iustin Pop committed
554 555 556 557
        try:
          del self._memcache[job.id]
        except KeyError:
          pass
558 559 560 561 562

  @utils.LockedMethod
  def UpdateJob(self, job):
    return self._UpdateJobUnlocked(job)

563
  @utils.LockedMethod
564
  def ArchiveJob(self, job_id):
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
    """Archives a job.

    @type job_id: string
    @param job_id: Job ID of job to be archived.

    """
    logging.debug("Archiving job %s", job_id)

    job = self._LoadJobUnlocked(job_id)
    if not job:
      logging.debug("Job %s not found", job_id)
      return

    if not self._ShouldJobBeArchivedUnlocked(job):
      return

    try:
      old = self._GetJobPath(job.id)
      new = self._GetArchivedJobPath(job.id)

      os.rename(old, new)

      logging.debug("Successfully archived job %s", job.id)
    finally:
      # Cleaning the cache because we don't know what os.rename actually did
      # and to be on the safe side.
      self._CleanCacheUnlocked([])
592 593


Michael Hanselmann's avatar
Michael Hanselmann committed
594 595 596
class JobQueue:
  """The job queue.

597
  """
Michael Hanselmann's avatar
Michael Hanselmann committed
598 599
  def __init__(self, context):
    self._lock = threading.Lock()
600
    self._jobs = DiskJobStorage("")
Michael Hanselmann's avatar
Michael Hanselmann committed
601 602
    self._wpool = _JobQueueWorkerPool(context)

603 604 605 606
    for job in self._jobs.GetJobs(None):
      status = job.GetStatus()
      if status in (constants.JOB_STATUS_QUEUED, ):
        self._wpool.AddTask(job)
Michael Hanselmann's avatar
Michael Hanselmann committed
607

608 609 610
      elif status in (constants.JOB_STATUS_RUNNING, ):
        logging.warning("Unfinished job %s found: %s", job.id, job)
        job.SetUnclean("Unclean master daemon shutdown")
Michael Hanselmann's avatar
Michael Hanselmann committed
611

612
  @utils.LockedMethod
613
  def SubmitJob(self, ops, nodes):
Michael Hanselmann's avatar
Michael Hanselmann committed
614 615 616 617 618
    """Add a new job to the queue.

    This enters the job into our job queue and also puts it on the new
    queue, in order for it to be picked up by the queue processors.

619 620 621 622 623
    @type ops: list
    @param ops: the sequence of opcodes that will become the new job
    @type nodes: list
    @param nodes: the list of nodes to which the queue should be
                  distributed
Michael Hanselmann's avatar
Michael Hanselmann committed
624 625

    """
626
    job = self._jobs.AddJob(ops, nodes)
Michael Hanselmann's avatar
Michael Hanselmann committed
627 628 629 630

    # Add to worker pool
    self._wpool.AddTask(job)

631
    return job.id
Michael Hanselmann's avatar
Michael Hanselmann committed
632 633

  def ArchiveJob(self, job_id):
634
    self._jobs.ArchiveJob(job_id)
Michael Hanselmann's avatar
Michael Hanselmann committed
635 636 637 638 639 640 641 642 643 644 645

  def CancelJob(self, job_id):
    raise NotImplementedError()

  def _GetJobInfo(self, job, fields):
    row = []
    for fname in fields:
      if fname == "id":
        row.append(job.id)
      elif fname == "status":
        row.append(job.GetStatus())
646 647 648
      elif fname == "ops":
        row.append([op.GetInput().__getstate__() for op in job._ops])
      elif fname == "opresult":
649
        row.append([op.GetResult() for op in job._ops])
650 651
      elif fname == "opstatus":
        row.append([op.GetStatus() for op in job._ops])
652 653 654 655 656 657 658 659 660 661 662 663
      elif fname == "ticker":
        ji = job.GetRunOpIndex()
        if ji < 0:
          lmsg = None
        else:
          lmsg = job._ops[ji].RetrieveLog(-1)
          # message might be empty here
          if lmsg:
            lmsg = lmsg[0]
          else:
            lmsg = None
        row.append(lmsg)
Michael Hanselmann's avatar
Michael Hanselmann committed
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
      else:
        raise errors.OpExecError("Invalid job query field '%s'" % fname)
    return row

  def QueryJobs(self, job_ids, fields):
    """Returns a list of jobs in queue.

    Args:
    - job_ids: Sequence of job identifiers or None for all
    - fields: Names of fields to return

    """
    self._lock.acquire()
    try:
      jobs = []

680
      for job in self._jobs.GetJobs(job_ids):
Michael Hanselmann's avatar
Michael Hanselmann committed
681 682 683 684 685 686 687 688 689
        if job is None:
          jobs.append(None)
        else:
          jobs.append(self._GetJobInfo(job, fields))

      return jobs
    finally:
      self._lock.release()

690
  @utils.LockedMethod
Michael Hanselmann's avatar
Michael Hanselmann committed
691 692 693 694 695
  def Shutdown(self):
    """Stops the job queue.

    """
    self._wpool.TerminateWorkers()
696
    self._jobs.Close()