jqueue.py 17.8 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
#
#

# Copyright (C) 2006, 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.


"""Module implementing the job queue handling."""

24
import os
Michael Hanselmann's avatar
Michael Hanselmann committed
25 26
import logging
import threading
27 28
import errno
import re
29
import time
Iustin Pop's avatar
Iustin Pop committed
30

Michael Hanselmann's avatar
Michael Hanselmann committed
31
from ganeti import constants
32
from ganeti import serializer
Michael Hanselmann's avatar
Michael Hanselmann committed
33
from ganeti import workerpool
34
from ganeti import opcodes
Iustin Pop's avatar
Iustin Pop committed
35
from ganeti import errors
Michael Hanselmann's avatar
Michael Hanselmann committed
36
from ganeti import mcpu
37
from ganeti import utils
38
from ganeti import rpc
Michael Hanselmann's avatar
Michael Hanselmann committed
39 40 41 42


JOBQUEUE_THREADS = 5

Iustin Pop's avatar
Iustin Pop committed
43

Michael Hanselmann's avatar
Michael Hanselmann committed
44 45 46
class _QueuedOpCode(object):
  """Encasulates an opcode object.

47
  Access is synchronized by the '_lock' attribute.
Michael Hanselmann's avatar
Michael Hanselmann committed
48

49 50 51
  The 'log' attribute holds the execution log and consists of tuples
  of the form (timestamp, level, message).

Michael Hanselmann's avatar
Michael Hanselmann committed
52 53
  """
  def __init__(self, op):
54
    self.__Setup(op, constants.OP_STATUS_QUEUED, None, [])
55

56
  def __Setup(self, input_, status, result, log):
57
    self._lock = threading.Lock()
58
    self.input = input_
59 60
    self.status = status
    self.result = result
61
    self.log = log
62 63 64 65 66

  @classmethod
  def Restore(cls, state):
    obj = object.__new__(cls)
    obj.__Setup(opcodes.OpCode.LoadOpCode(state["input"]),
67
                state["status"], state["result"], state["log"])
68 69 70 71 72 73 74 75
    return obj

  @utils.LockedMethod
  def Serialize(self):
    return {
      "input": self.input.__getstate__(),
      "status": self.status,
      "result": self.result,
76
      "log": self.log,
77
      }
78

79 80 81 82 83 84 85
  @utils.LockedMethod
  def GetInput(self):
    """Returns the original opcode.

    """
    return self.input

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
  @utils.LockedMethod
  def SetStatus(self, status, result):
    """Update the opcode status and result.

    """
    self.status = status
    self.result = result

  @utils.LockedMethod
  def GetStatus(self):
    """Get the opcode status.

    """
    return self.status

  @utils.LockedMethod
  def GetResult(self):
    """Get the opcode result.

    """
    return self.result
Michael Hanselmann's avatar
Michael Hanselmann committed
107

108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
  @utils.LockedMethod
  def Log(self, *args):
    """Append a log entry.

    """
    assert len(args) < 2

    if len(args) == 1:
      log_type = constants.ELOG_MESSAGE
      log_msg = args[0]
    else:
      log_type, log_msg = args
    self.log.append((time.time(), log_type, log_msg))

  @utils.LockedMethod
  def RetrieveLog(self, start_at=0):
    """Retrieve (a part of) the execution log.

    """
    return self.log[start_at:]

Michael Hanselmann's avatar
Michael Hanselmann committed
129 130 131 132 133 134 135

class _QueuedJob(object):
  """In-memory job representation.

  This is what we use to track the user-submitted jobs.

  """
136
  def __init__(self, storage, job_id, ops):
Michael Hanselmann's avatar
Michael Hanselmann committed
137 138 139 140
    if not ops:
      # TODO
      raise Exception("No opcodes")

141
    self.__Setup(storage, job_id, [_QueuedOpCode(op) for op in ops], -1)
Michael Hanselmann's avatar
Michael Hanselmann committed
142

143 144
  def __Setup(self, storage, job_id, ops, run_op_index):
    self._lock = threading.Lock()
145 146 147
    self.storage = storage
    self.id = job_id
    self._ops = ops
148
    self.run_op_index = run_op_index
149 150 151 152

  @classmethod
  def Restore(cls, storage, state):
    obj = object.__new__(cls)
153 154
    op_list = [_QueuedOpCode.Restore(op_state) for op_state in state["ops"]]
    obj.__Setup(storage, state["id"], op_list, state["run_op_index"])
155 156 157 158 159 160
    return obj

  def Serialize(self):
    return {
      "id": self.id,
      "ops": [op.Serialize() for op in self._ops],
161
      "run_op_index": self.run_op_index,
162 163 164 165 166 167 168 169
      }

  def SetUnclean(self, msg):
    try:
      for op in self._ops:
        op.SetStatus(constants.OP_STATUS_ERROR, msg)
    finally:
      self.storage.UpdateJob(self)
Michael Hanselmann's avatar
Michael Hanselmann committed
170

171
  def GetStatus(self):
Michael Hanselmann's avatar
Michael Hanselmann committed
172 173 174 175
    status = constants.JOB_STATUS_QUEUED

    all_success = True
    for op in self._ops:
176 177
      op_status = op.GetStatus()
      if op_status == constants.OP_STATUS_SUCCESS:
Michael Hanselmann's avatar
Michael Hanselmann committed
178 179 180 181
        continue

      all_success = False

182
      if op_status == constants.OP_STATUS_QUEUED:
Michael Hanselmann's avatar
Michael Hanselmann committed
183
        pass
184
      elif op_status == constants.OP_STATUS_RUNNING:
Michael Hanselmann's avatar
Michael Hanselmann committed
185
        status = constants.JOB_STATUS_RUNNING
186 187 188 189
      elif op_status == constants.OP_STATUS_ERROR:
        status = constants.JOB_STATUS_ERROR
        # The whole job fails if one opcode failed
        break
Michael Hanselmann's avatar
Michael Hanselmann committed
190 191 192 193 194 195

    if all_success:
      status = constants.JOB_STATUS_SUCCESS

    return status

196 197 198 199
  @utils.LockedMethod
  def GetRunOpIndex(self):
    return self.run_op_index

Michael Hanselmann's avatar
Michael Hanselmann committed
200 201 202 203 204 205 206 207 208 209 210
  def Run(self, proc):
    """Job executor.

    This functions processes a this job in the context of given processor
    instance.

    Args:
    - proc: Ganeti Processor to run the job with

    """
    try:
211 212
      count = len(self._ops)
      for idx, op in enumerate(self._ops):
Michael Hanselmann's avatar
Michael Hanselmann committed
213
        try:
214
          logging.debug("Op %s/%s: Starting %s", idx + 1, count, op)
215 216 217 218 219 220 221

          self._lock.acquire()
          try:
            self.run_op_index = idx
          finally:
            self._lock.release()

222
          op.SetStatus(constants.OP_STATUS_RUNNING, None)
223
          self.storage.UpdateJob(self)
Michael Hanselmann's avatar
Michael Hanselmann committed
224

225
          result = proc.ExecOpCode(op.input, op.Log)
Michael Hanselmann's avatar
Michael Hanselmann committed
226

227
          op.SetStatus(constants.OP_STATUS_SUCCESS, result)
228
          self.storage.UpdateJob(self)
229 230
          logging.debug("Op %s/%s: Successfully finished %s",
                        idx + 1, count, op)
Michael Hanselmann's avatar
Michael Hanselmann committed
231
        except Exception, err:
232 233 234 235 236
          try:
            op.SetStatus(constants.OP_STATUS_ERROR, str(err))
            logging.debug("Op %s/%s: Error in %s", idx + 1, count, op)
          finally:
            self.storage.UpdateJob(self)
Michael Hanselmann's avatar
Michael Hanselmann committed
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
          raise

    except errors.GenericError, err:
      logging.error("ganeti exception %s", exc_info=err)
    except Exception, err:
      logging.error("unhandled exception %s", exc_info=err)
    except:
      logging.error("unhandled unknown exception %s", exc_info=err)


class _JobQueueWorker(workerpool.BaseWorker):
  def RunTask(self, job):
    logging.debug("Worker %s processing job %s",
                  self.worker_id, job.id)
    # TODO: feedback function
252
    proc = mcpu.Processor(self.pool.context)
Michael Hanselmann's avatar
Michael Hanselmann committed
253 254 255 256 257 258 259 260 261 262 263 264 265 266
    try:
      job.Run(proc)
    finally:
      logging.debug("Worker %s finished job %s, status = %s",
                    self.worker_id, job.id, job.GetStatus())


class _JobQueueWorkerPool(workerpool.WorkerPool):
  def __init__(self, context):
    super(_JobQueueWorkerPool, self).__init__(JOBQUEUE_THREADS,
                                              _JobQueueWorker)
    self.context = context


267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
class JobStorageBase(object):
  def __init__(self, id_prefix):
    self.id_prefix = id_prefix

    if id_prefix:
      prefix_pattern = re.escape("%s-" % id_prefix)
    else:
      prefix_pattern = ""

    # Apart from the prefix, all job IDs are numeric
    self._re_job_id = re.compile(r"^%s\d+$" % prefix_pattern)

  def OwnsJobId(self, job_id):
    return self._re_job_id.match(job_id)

  def FormatJobID(self, job_id):
    if not isinstance(job_id, (int, long)):
      raise errors.ProgrammerError("Job ID '%s' not numeric" % job_id)
    if job_id < 0:
      raise errors.ProgrammerError("Job ID %s is negative" % job_id)

    if self.id_prefix:
      prefix = "%s-" % self.id_prefix
    else:
      prefix = ""

    return "%s%010d" % (prefix, job_id)

295 296 297 298 299 300 301 302
  def _ShouldJobBeArchivedUnlocked(self, job):
    if job.GetStatus() not in (constants.JOB_STATUS_CANCELED,
                               constants.JOB_STATUS_SUCCESS,
                               constants.JOB_STATUS_ERROR):
      logging.debug("Job %s is not yet done", job.id)
      return False
    return True

303 304

class DiskJobStorage(JobStorageBase):
305
  _RE_JOB_FILE = re.compile(r"^job-(%s)$" % constants.JOB_ID_TEMPLATE)
306

307 308 309
  def __init__(self, id_prefix):
    JobStorageBase.__init__(self, id_prefix)

310
    self._lock = threading.Lock()
Iustin Pop's avatar
Iustin Pop committed
311
    self._memcache = {}
312
    self._my_hostname = utils.HostInfo().name
313

314 315 316 317 318 319 320
    # Make sure our directories exists
    for path in (constants.QUEUE_DIR, constants.JOB_QUEUE_ARCHIVE_DIR):
      try:
        os.mkdir(path, 0700)
      except OSError, err:
        if err.errno not in (errno.EEXIST, ):
          raise
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353

    # Get queue lock
    self.lock_fd = open(constants.JOB_QUEUE_LOCK_FILE, "w")
    try:
      utils.LockFile(self.lock_fd)
    except:
      self.lock_fd.close()
      raise

    # Read version
    try:
      version_fd = open(constants.JOB_QUEUE_VERSION_FILE, "r")
    except IOError, err:
      if err.errno not in (errno.ENOENT, ):
        raise

      # Setup a new queue
      self._InitQueueUnlocked()

      # Try to open again
      version_fd = open(constants.JOB_QUEUE_VERSION_FILE, "r")

    try:
      # Try to read version
      version = int(version_fd.read(128))

      # Verify version
      if version != constants.JOB_QUEUE_VERSION:
        raise errors.JobQueueError("Found version %s, expected %s",
                                   version, constants.JOB_QUEUE_VERSION)
    finally:
      version_fd.close()

354 355 356 357 358 359 360 361 362 363 364 365 366 367
    self._last_serial = self._ReadSerial()
    if self._last_serial is None:
      raise errors.ConfigurationError("Can't read/parse the job queue serial"
                                      " file")

  @staticmethod
  def _ReadSerial():
    """Try to read the job serial file.

    @rtype: None or int
    @return: If the serial can be read, then it is returned. Otherwise None
             is returned.

    """
368
    try:
369 370 371 372 373 374 375 376 377 378
      serial_fd = open(constants.JOB_QUEUE_SERIAL_FILE, "r")
      try:
        # Read last serial
        serial = int(serial_fd.read(1024).strip())
      finally:
        serial_fd.close()
    except (ValueError, EnvironmentError):
      serial = None

    return serial
379 380 381 382 383 384 385 386 387 388 389 390

  def Close(self):
    assert self.lock_fd, "Queue should be open"

    self.lock_fd.close()
    self.lock_fd = None

  def _InitQueueUnlocked(self):
    assert self.lock_fd, "Queue should be open"

    utils.WriteFile(constants.JOB_QUEUE_VERSION_FILE,
                    data="%s\n" % constants.JOB_QUEUE_VERSION)
391 392 393
    if self._ReadSerial() is None:
      utils.WriteFile(constants.JOB_QUEUE_SERIAL_FILE,
                      data="%s\n" % 0)
394

395
  def _NewSerialUnlocked(self, nodes):
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
    """Generates a new job identifier.

    Job identifiers are unique during the lifetime of a cluster.

    Returns: A string representing the job identifier.

    """
    assert self.lock_fd, "Queue should be open"

    # New number
    serial = self._last_serial + 1

    # Write to file
    utils.WriteFile(constants.JOB_QUEUE_SERIAL_FILE,
                    data="%s\n" % serial)

    # Keep it only if we were able to write the file
    self._last_serial = serial

415 416 417 418 419 420 421 422 423 424 425
    # Distribute the serial to the other nodes
    try:
      nodes.remove(self._my_hostname)
    except ValueError:
      pass

    result = rpc.call_upload_file(nodes, constants.JOB_QUEUE_SERIAL_FILE)
    for node in nodes:
      if not result[node]:
        logging.error("copy of job queue file to node %s failed", node)

426
    return self.FormatJobID(serial)
427 428 429 430

  def _GetJobPath(self, job_id):
    return os.path.join(constants.QUEUE_DIR, "job-%s" % job_id)

431 432 433
  def _GetArchivedJobPath(self, job_id):
    return os.path.join(constants.JOB_QUEUE_ARCHIVE_DIR, "job-%s" % job_id)

434 435 436 437 438 439 440
  def _ExtractJobID(self, name):
    m = self._RE_JOB_FILE.match(name)
    if m:
      return m.group(1)
    else:
      return None

441 442 443 444 445 446
  def _GetJobIDsUnlocked(self, archived=False):
    """Return all known job IDs.

    If the parameter archived is True, archived jobs IDs will be
    included. Currently this argument is unused.

Iustin Pop's avatar
Iustin Pop committed
447 448 449 450
    The method only looks at disk because it's a requirement that all
    jobs are present on disk (so in the _memcache we don't have any
    extra IDs).

451
    """
452
    jlist = [self._ExtractJobID(name) for name in self._ListJobFiles()]
453 454
    jlist.sort()
    return jlist
455

456 457 458 459 460 461
  def _ListJobFiles(self):
    assert self.lock_fd, "Queue should be open"

    return [name for name in utils.ListVisibleFiles(constants.QUEUE_DIR)
            if self._RE_JOB_FILE.match(name)]

462
  def _LoadJobUnlocked(self, job_id):
463 464
    assert self.lock_fd, "Queue should be open"

Iustin Pop's avatar
Iustin Pop committed
465
    if job_id in self._memcache:
466
      logging.debug("Found job %s in memcache", job_id)
Iustin Pop's avatar
Iustin Pop committed
467 468
      return self._memcache[job_id]

469
    filepath = self._GetJobPath(job_id)
470 471 472 473 474 475 476 477 478 479 480 481
    logging.debug("Loading job from %s", filepath)
    try:
      fd = open(filepath, "r")
    except IOError, err:
      if err.errno in (errno.ENOENT, ):
        return None
      raise
    try:
      data = serializer.LoadJson(fd.read())
    finally:
      fd.close()

Iustin Pop's avatar
Iustin Pop committed
482 483
    job = _QueuedJob.Restore(self, data)
    self._memcache[job_id] = job
484
    logging.debug("Added job %s to the cache", job_id)
Iustin Pop's avatar
Iustin Pop committed
485
    return job
486 487

  def _GetJobsUnlocked(self, job_ids):
488 489
    if not job_ids:
      job_ids = self._GetJobIDsUnlocked()
490

491
    return [self._LoadJobUnlocked(job_id) for job_id in job_ids]
492 493 494 495 496 497

  @utils.LockedMethod
  def GetJobs(self, job_ids):
    return self._GetJobsUnlocked(job_ids)

  @utils.LockedMethod
498 499 500 501
  def AddJob(self, ops, nodes):
    """Create and store on disk a new job.

    @type ops: list
502
    @param ops: The list of OpCodes that will become the new job.
503 504 505 506 507
    @type nodes: list
    @param nodes: The list of nodes to which the new job serial will be
                  distributed.

    """
508 509 510
    assert self.lock_fd, "Queue should be open"

    # Get job identifier
511
    job_id = self._NewSerialUnlocked(nodes)
512 513 514 515 516
    job = _QueuedJob(self, job_id, ops)

    # Write to disk
    self._UpdateJobUnlocked(job)

517
    logging.debug("Added new job %s to the cache", job_id)
Iustin Pop's avatar
Iustin Pop committed
518 519
    self._memcache[job_id] = job

520 521 522 523 524 525 526 527 528
    return job

  def _UpdateJobUnlocked(self, job):
    assert self.lock_fd, "Queue should be open"

    filename = self._GetJobPath(job.id)
    logging.debug("Writing job %s to %s", job.id, filename)
    utils.WriteFile(filename,
                    data=serializer.DumpJson(job.Serialize(), indent=False))
529
    self._CleanCacheUnlocked([job.id])
Iustin Pop's avatar
Iustin Pop committed
530

531
  def _CleanCacheUnlocked(self, exclude):
Iustin Pop's avatar
Iustin Pop committed
532 533 534 535 536 537
    """Clean the memory cache.

    The exceptions argument contains job IDs that should not be
    cleaned.

    """
538
    assert isinstance(exclude, list)
Iustin Pop's avatar
Iustin Pop committed
539
    for job in self._memcache.values():
540
      if job.id in exclude:
Iustin Pop's avatar
Iustin Pop committed
541 542 543
        continue
      if job.GetStatus() not in (constants.JOB_STATUS_QUEUED,
                                 constants.JOB_STATUS_RUNNING):
544
        logging.debug("Cleaning job %s from the cache", job.id)
Iustin Pop's avatar
Iustin Pop committed
545 546 547 548
        try:
          del self._memcache[job.id]
        except KeyError:
          pass
549 550 551 552 553

  @utils.LockedMethod
  def UpdateJob(self, job):
    return self._UpdateJobUnlocked(job)

554
  @utils.LockedMethod
555
  def ArchiveJob(self, job_id):
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
    """Archives a job.

    @type job_id: string
    @param job_id: Job ID of job to be archived.

    """
    logging.debug("Archiving job %s", job_id)

    job = self._LoadJobUnlocked(job_id)
    if not job:
      logging.debug("Job %s not found", job_id)
      return

    if not self._ShouldJobBeArchivedUnlocked(job):
      return

    try:
      old = self._GetJobPath(job.id)
      new = self._GetArchivedJobPath(job.id)

      os.rename(old, new)

      logging.debug("Successfully archived job %s", job.id)
    finally:
      # Cleaning the cache because we don't know what os.rename actually did
      # and to be on the safe side.
      self._CleanCacheUnlocked([])
583 584


Michael Hanselmann's avatar
Michael Hanselmann committed
585 586 587
class JobQueue:
  """The job queue.

588
  """
Michael Hanselmann's avatar
Michael Hanselmann committed
589 590
  def __init__(self, context):
    self._lock = threading.Lock()
591
    self._jobs = DiskJobStorage("")
Michael Hanselmann's avatar
Michael Hanselmann committed
592 593
    self._wpool = _JobQueueWorkerPool(context)

594 595 596 597
    for job in self._jobs.GetJobs(None):
      status = job.GetStatus()
      if status in (constants.JOB_STATUS_QUEUED, ):
        self._wpool.AddTask(job)
Michael Hanselmann's avatar
Michael Hanselmann committed
598

599 600 601
      elif status in (constants.JOB_STATUS_RUNNING, ):
        logging.warning("Unfinished job %s found: %s", job.id, job)
        job.SetUnclean("Unclean master daemon shutdown")
Michael Hanselmann's avatar
Michael Hanselmann committed
602

603
  @utils.LockedMethod
604
  def SubmitJob(self, ops, nodes):
Michael Hanselmann's avatar
Michael Hanselmann committed
605 606 607 608 609
    """Add a new job to the queue.

    This enters the job into our job queue and also puts it on the new
    queue, in order for it to be picked up by the queue processors.

610 611 612 613 614
    @type ops: list
    @param ops: the sequence of opcodes that will become the new job
    @type nodes: list
    @param nodes: the list of nodes to which the queue should be
                  distributed
Michael Hanselmann's avatar
Michael Hanselmann committed
615 616

    """
617
    job = self._jobs.AddJob(ops, nodes)
Michael Hanselmann's avatar
Michael Hanselmann committed
618 619 620 621

    # Add to worker pool
    self._wpool.AddTask(job)

622
    return job.id
Michael Hanselmann's avatar
Michael Hanselmann committed
623 624

  def ArchiveJob(self, job_id):
625
    self._jobs.ArchiveJob(job_id)
Michael Hanselmann's avatar
Michael Hanselmann committed
626 627 628 629 630 631 632 633 634 635 636

  def CancelJob(self, job_id):
    raise NotImplementedError()

  def _GetJobInfo(self, job, fields):
    row = []
    for fname in fields:
      if fname == "id":
        row.append(job.id)
      elif fname == "status":
        row.append(job.GetStatus())
637 638 639
      elif fname == "ops":
        row.append([op.GetInput().__getstate__() for op in job._ops])
      elif fname == "opresult":
640
        row.append([op.GetResult() for op in job._ops])
641 642
      elif fname == "opstatus":
        row.append([op.GetStatus() for op in job._ops])
643 644 645 646 647 648 649 650 651 652 653 654
      elif fname == "ticker":
        ji = job.GetRunOpIndex()
        if ji < 0:
          lmsg = None
        else:
          lmsg = job._ops[ji].RetrieveLog(-1)
          # message might be empty here
          if lmsg:
            lmsg = lmsg[0]
          else:
            lmsg = None
        row.append(lmsg)
Michael Hanselmann's avatar
Michael Hanselmann committed
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
      else:
        raise errors.OpExecError("Invalid job query field '%s'" % fname)
    return row

  def QueryJobs(self, job_ids, fields):
    """Returns a list of jobs in queue.

    Args:
    - job_ids: Sequence of job identifiers or None for all
    - fields: Names of fields to return

    """
    self._lock.acquire()
    try:
      jobs = []

671
      for job in self._jobs.GetJobs(job_ids):
Michael Hanselmann's avatar
Michael Hanselmann committed
672 673 674 675 676 677 678 679 680
        if job is None:
          jobs.append(None)
        else:
          jobs.append(self._GetJobInfo(job, fields))

      return jobs
    finally:
      self._lock.release()

681
  @utils.LockedMethod
Michael Hanselmann's avatar
Michael Hanselmann committed
682 683 684 685 686
  def Shutdown(self):
    """Stops the job queue.

    """
    self._wpool.TerminateWorkers()
687
    self._jobs.Close()