JQueue.hs 26.7 KB
Newer Older
Iustin Pop's avatar
Iustin Pop committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
{-# LANGUAGE TemplateHaskell #-}

{-| Implementation of the job queue.

-}

{-

Copyright (C) 2010, 2012 Google Inc.

This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.

-}

module Ganeti.JQueue
    ( QueuedOpCode(..)
    , QueuedJob(..)
    , InputOpCode(..)
32
    , queuedOpCodeFromMetaOpCode
33
    , queuedJobFromOpCodes
34
    , changeOpCodePriority
35
    , changeJobPriority
36
    , cancelQueuedJob
37
    , failQueuedJob
Iustin Pop's avatar
Iustin Pop committed
38
    , Timestamp
39
    , fromClockTime
Iustin Pop's avatar
Iustin Pop committed
40
    , noTimestamp
41
    , currentTimestamp
42
    , advanceTimestamp
43
    , setReceivedTimestamp
44
    , extendJobReasonTrail
45
    , getJobDependencies
Iustin Pop's avatar
Iustin Pop committed
46
47
48
    , opStatusFinalized
    , extractOpSummary
    , calcJobStatus
49
    , jobStarted
50
    , jobFinalized
51
    , jobArchivable
Iustin Pop's avatar
Iustin Pop committed
52
53
54
55
56
57
58
59
60
    , calcJobPriority
    , jobFileName
    , liveJobFile
    , archivedJobFile
    , determineJobDirectories
    , getJobIDs
    , sortJobIDs
    , loadJobFromDisk
    , noSuchJob
61
    , readSerialFromDisk
62
63
    , allocateJobIds
    , allocateJobId
64
    , writeJobToDisk
65
    , replicateManyJobs
66
    , writeAndReplicateJob
67
    , isQueueOpen
Klaus Aehlig's avatar
Klaus Aehlig committed
68
    , startJobs
Klaus Aehlig's avatar
Klaus Aehlig committed
69
    , cancelJob
70
    , queueDirPermissions
71
    , archiveJobs
Iustin Pop's avatar
Iustin Pop committed
72
73
    ) where

74
import Control.Applicative (liftA2, (<|>))
75
import Control.Arrow (first, second)
Petr Pudlak's avatar
Petr Pudlak committed
76
import Control.Concurrent (forkIO, threadDelay)
77
import Control.Concurrent.MVar
Iustin Pop's avatar
Iustin Pop committed
78
79
import Control.Exception
import Control.Monad
80
import Control.Monad.IO.Class
Petr Pudlak's avatar
Petr Pudlak committed
81
82
83
import Control.Monad.Trans (lift)
import Control.Monad.Trans.Maybe
import Data.Functor ((<$), (<$>))
Iustin Pop's avatar
Iustin Pop committed
84
import Data.List
85
import Data.Maybe
Iustin Pop's avatar
Iustin Pop committed
86
87
import Data.Ord (comparing)
-- workaround what seems to be a bug in ghc 7.4's TH shadowing code
88
import Prelude hiding (id, log)
Iustin Pop's avatar
Iustin Pop committed
89
90
91
92
import System.Directory
import System.FilePath
import System.IO.Error (isDoesNotExistError)
import System.Posix.Files
Petr Pudlak's avatar
Petr Pudlak committed
93
import System.Posix.Signals (sigTERM, signalProcess)
94
import System.Time
Iustin Pop's avatar
Iustin Pop committed
95
96
97
98
import qualified Text.JSON
import Text.JSON.Types

import Ganeti.BasicTypes
99
import qualified Ganeti.Config as Config
Iustin Pop's avatar
Iustin Pop committed
100
import qualified Ganeti.Constants as C
Petr Pudlak's avatar
Petr Pudlak committed
101
import Ganeti.Errors (ErrorResult, ResultG)
Iustin Pop's avatar
Iustin Pop committed
102
103
import Ganeti.JSON
import Ganeti.Logging
Klaus Aehlig's avatar
Klaus Aehlig committed
104
import Ganeti.Luxi
105
import Ganeti.Objects (ConfigData, Node)
Iustin Pop's avatar
Iustin Pop committed
106
107
import Ganeti.OpCodes
import Ganeti.Path
108
import Ganeti.Query.Exec as Exec
109
import Ganeti.Rpc (executeRpcCall, ERpcError, logRpcErrors,
110
                   RpcCallJobqueueUpdate(..), RpcCallJobqueueRename(..))
Iustin Pop's avatar
Iustin Pop committed
111
import Ganeti.THH
112
import Ganeti.THH.Field
Iustin Pop's avatar
Iustin Pop committed
113
import Ganeti.Types
114
import Ganeti.Utils
115
import Ganeti.Utils.Atomic
Petr Pudlak's avatar
Petr Pudlak committed
116
import Ganeti.Utils.Livelock (Livelock, isDead)
117
import Ganeti.VCluster (makeVirtualPath)
Iustin Pop's avatar
Iustin Pop committed
118
119
120

-- * Data types

121
122
123
-- | The ganeti queue timestamp type. It represents the time as the pair
-- of seconds since the epoch and microseconds since the beginning of the
-- second.
Iustin Pop's avatar
Iustin Pop committed
124
125
126
127
128
129
type Timestamp = (Int, Int)

-- | Missing timestamp type.
noTimestamp :: Timestamp
noTimestamp = (-1, -1)

130
131
132
133
134
-- | Obtain a Timestamp from a given clock time
fromClockTime :: ClockTime -> Timestamp
fromClockTime (TOD ctime pico) =
  (fromIntegral ctime, fromIntegral $ pico `div` 1000000)

135
136
-- | Get the current time in the job-queue timestamp format.
currentTimestamp :: IO Timestamp
137
currentTimestamp = fromClockTime `liftM` getClockTime
138

139
140
141
142
143
-- | From a given timestamp, obtain the timestamp of the
-- time that is the given number of seconds later.
advanceTimestamp :: Int -> Timestamp -> Timestamp
advanceTimestamp = first . (+)

Iustin Pop's avatar
Iustin Pop committed
144
145
146
147
148
-- | An input opcode.
data InputOpCode = ValidOpCode MetaOpCode -- ^ OpCode was parsed successfully
                 | InvalidOpCode JSValue  -- ^ Invalid opcode
                   deriving (Show, Eq)

149
150
151
152
153
-- | From an InputOpCode obtain the MetaOpCode, if any.
toMetaOpCode :: InputOpCode -> [MetaOpCode]
toMetaOpCode (ValidOpCode mopc) = [mopc]
toMetaOpCode _ = []

Iustin Pop's avatar
Iustin Pop committed
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
-- | JSON instance for 'InputOpCode', trying to parse it and if
-- failing, keeping the original JSValue.
instance Text.JSON.JSON InputOpCode where
  showJSON (ValidOpCode mo) = Text.JSON.showJSON mo
  showJSON (InvalidOpCode inv) = inv
  readJSON v = case Text.JSON.readJSON v of
                 Text.JSON.Error _ -> return $ InvalidOpCode v
                 Text.JSON.Ok mo -> return $ ValidOpCode mo

-- | Invalid opcode summary.
invalidOp :: String
invalidOp = "INVALID_OP"

-- | Tries to extract the opcode summary from an 'InputOpCode'. This
-- duplicates some functionality from the 'opSummary' function in
-- "Ganeti.OpCodes".
extractOpSummary :: InputOpCode -> String
extractOpSummary (ValidOpCode metaop) = opSummary $ metaOpCode metaop
extractOpSummary (InvalidOpCode (JSObject o)) =
  case fromObjWithDefault (fromJSObject o) "OP_ID" ("OP_" ++ invalidOp) of
    Just s -> drop 3 s -- drop the OP_ prefix
    Nothing -> invalidOp
extractOpSummary _ = invalidOp

$(buildObject "QueuedOpCode" "qo"
  [ simpleField "input"           [t| InputOpCode |]
  , simpleField "status"          [t| OpStatus    |]
  , simpleField "result"          [t| JSValue     |]
  , defaultField [| [] |] $
    simpleField "log"             [t| [(Int, Timestamp, ELogType, JSValue)] |]
  , simpleField "priority"        [t| Int         |]
  , optionalNullSerField $
    simpleField "start_timestamp" [t| Timestamp   |]
  , optionalNullSerField $
    simpleField "exec_timestamp"  [t| Timestamp   |]
  , optionalNullSerField $
    simpleField "end_timestamp"   [t| Timestamp   |]
  ])

$(buildObject "QueuedJob" "qj"
  [ simpleField "id"                 [t| JobId          |]
  , simpleField "ops"                [t| [QueuedOpCode] |]
  , optionalNullSerField $
    simpleField "received_timestamp" [t| Timestamp      |]
  , optionalNullSerField $
    simpleField "start_timestamp"    [t| Timestamp      |]
  , optionalNullSerField $
    simpleField "end_timestamp"      [t| Timestamp      |]
202
203
204
  , optionalField $
    simpleField "livelock"           [t| FilePath      |]
  , optionalField $ processIdField "process_id"
Iustin Pop's avatar
Iustin Pop committed
205
206
  ])

207
208
209
210
211
212
213
214
215
216
217
218
219
220
-- | Convenience function to obtain a QueuedOpCode from a MetaOpCode
queuedOpCodeFromMetaOpCode :: MetaOpCode -> QueuedOpCode
queuedOpCodeFromMetaOpCode op =
  QueuedOpCode { qoInput = ValidOpCode op
               , qoStatus = OP_STATUS_QUEUED
               , qoPriority = opSubmitPriorityToRaw . opPriority . metaParams
                              $ op
               , qoLog = []
               , qoResult = JSNull
               , qoStartTimestamp = Nothing
               , qoEndTimestamp = Nothing
               , qoExecTimestamp = Nothing
               }

221
222
223
224
225
226
227
228
-- | From a job-id and a list of op-codes create a job. This is
-- the pure part of job creation, as allocating a new job id
-- lives in IO.
queuedJobFromOpCodes :: (Monad m) => JobId -> [MetaOpCode] -> m QueuedJob
queuedJobFromOpCodes jobid ops = do
  ops' <- mapM (`resolveDependencies` jobid) ops
  return QueuedJob { qjId = jobid
                   , qjOps = map queuedOpCodeFromMetaOpCode ops'
229
                   , qjReceivedTimestamp = Nothing
230
231
                   , qjStartTimestamp = Nothing
                   , qjEndTimestamp = Nothing
232
233
                   , qjLivelock = Nothing
                   , qjProcessId = Nothing
234
235
                   }

236
237
238
239
-- | Attach a received timestamp to a Queued Job.
setReceivedTimestamp :: Timestamp -> QueuedJob -> QueuedJob
setReceivedTimestamp ts job = job { qjReceivedTimestamp = Just ts }

240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
-- | Build a timestamp in the format expected by the reason trail (nanoseconds)
-- starting from a JQueue Timestamp.
reasonTrailTimestamp :: Timestamp -> Integer
reasonTrailTimestamp (sec, micro) =
  let sec' = toInteger sec
      micro' = toInteger micro
  in sec' * 1000000000 + micro' * 1000

-- | Append an element to the reason trail of an input opcode.
extendInputOpCodeReasonTrail :: JobId -> Timestamp -> Int -> InputOpCode
                             -> InputOpCode
extendInputOpCodeReasonTrail _ _ _ op@(InvalidOpCode _) = op
extendInputOpCodeReasonTrail jid ts i (ValidOpCode vOp) =
  let metaP = metaParams vOp
      op = metaOpCode vOp
      trail = opReason metaP
      reasonSrc = opReasonSrcID op
      reasonText = "job=" ++ show (fromJobId jid) ++ ";index=" ++ show i
      reason = (reasonSrc, reasonText, reasonTrailTimestamp ts)
      trail' = trail ++ [reason]
  in ValidOpCode $ vOp { metaParams = metaP { opReason = trail' } }

-- | Append an element to the reason trail of a queued opcode.
extendOpCodeReasonTrail :: JobId -> Timestamp -> Int -> QueuedOpCode
                        -> QueuedOpCode
extendOpCodeReasonTrail jid ts i op =
  let inOp = qoInput op
  in op { qoInput = extendInputOpCodeReasonTrail jid ts i inOp }

-- | Append an element to the reason trail of all the OpCodes of a queued job.
extendJobReasonTrail :: QueuedJob -> QueuedJob
extendJobReasonTrail job =
  let jobId = qjId job
      mTimestamp = qjReceivedTimestamp job
      -- This function is going to be called on QueuedJobs that already contain
      -- a timestamp. But for safety reasons we cannot assume mTimestamp will
      -- be (Just timestamp), so we use the value 0 in the extremely unlikely
      -- case this is not true.
      timestamp = fromMaybe (0, 0) mTimestamp
    in job
        { qjOps =
            zipWith (extendOpCodeReasonTrail jobId timestamp) [0..] $
              qjOps job
        }

285
286
287
288
289
290
291
292
-- | From a queued job obtain the list of jobs it depends on.
getJobDependencies :: QueuedJob -> [JobId]
getJobDependencies job = do
  op <- qjOps job
  mopc <- toMetaOpCode $ qoInput op
  dep <- fromMaybe [] . opDepends $ metaParams mopc
  getJobIdFromDependency dep

293
294
295
296
297
298
299
300
-- | Change the priority of a QueuedOpCode, if it is not already
-- finalized.
changeOpCodePriority :: Int -> QueuedOpCode -> QueuedOpCode
changeOpCodePriority prio op =
  if qoStatus op > OP_STATUS_RUNNING
     then op
     else op { qoPriority = prio }

301
302
303
304
305
-- | Set the state of a QueuedOpCode to canceled.
cancelOpCode :: Timestamp -> QueuedOpCode -> QueuedOpCode
cancelOpCode now op =
  op { qoStatus = OP_STATUS_CANCELED, qoEndTimestamp = Just now }

306
307
308
309
310
311
-- | Change the priority of a job, i.e., change the priority of the
-- non-finalized opcodes.
changeJobPriority :: Int -> QueuedJob -> QueuedJob
changeJobPriority prio job =
  job { qjOps = map (changeOpCodePriority prio) $ qjOps job }

312
313
314
315
-- | Transform a QueuedJob that has not been started into its canceled form.
cancelQueuedJob :: Timestamp -> QueuedJob -> QueuedJob
cancelQueuedJob now job =
  let ops' = map (cancelOpCode now) $ qjOps job
316
317
318
319
320
321
322
323
324
325
326
327
328
  in job { qjOps = ops', qjEndTimestamp = Just now }

-- | Set the state of a QueuedOpCode to canceled.
failOpCode :: Timestamp -> QueuedOpCode -> QueuedOpCode
failOpCode now op =
  op { qoStatus = OP_STATUS_ERROR, qoEndTimestamp = Just now }

-- | Transform a QueuedJob that has not been started into its canceled form.
failQueuedJob :: Timestamp -> QueuedJob -> QueuedJob
failQueuedJob now job =
  -- TODO: Add a reason trail message
  let ops' = map (failOpCode now) $ qjOps job
  in job { qjOps = ops', qjEndTimestamp = Just now }
329

Iustin Pop's avatar
Iustin Pop committed
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
-- | Job file prefix.
jobFilePrefix :: String
jobFilePrefix = "job-"

-- | Computes the filename for a given job ID.
jobFileName :: JobId -> FilePath
jobFileName jid = jobFilePrefix ++ show (fromJobId jid)

-- | Parses a job ID from a file name.
parseJobFileId :: (Monad m) => FilePath -> m JobId
parseJobFileId path =
  case stripPrefix jobFilePrefix path of
    Nothing -> fail $ "Job file '" ++ path ++
                      "' doesn't have the correct prefix"
    Just suffix -> makeJobIdS suffix

-- | Computes the full path to a live job.
liveJobFile :: FilePath -> JobId -> FilePath
liveJobFile rootdir jid = rootdir </> jobFileName jid

-- | Computes the full path to an archives job. BROKEN.
archivedJobFile :: FilePath -> JobId -> FilePath
archivedJobFile rootdir jid =
  let subdir = show (fromJobId jid `div` C.jstoreJobsPerArchiveDirectory)
  in rootdir </> jobQueueArchiveSubDir </> subdir </> jobFileName jid

-- | Map from opcode status to job status.
opStatusToJob :: OpStatus -> JobStatus
opStatusToJob OP_STATUS_QUEUED    = JOB_STATUS_QUEUED
opStatusToJob OP_STATUS_WAITING   = JOB_STATUS_WAITING
opStatusToJob OP_STATUS_SUCCESS   = JOB_STATUS_SUCCESS
opStatusToJob OP_STATUS_RUNNING   = JOB_STATUS_RUNNING
opStatusToJob OP_STATUS_CANCELING = JOB_STATUS_CANCELING
opStatusToJob OP_STATUS_CANCELED  = JOB_STATUS_CANCELED
opStatusToJob OP_STATUS_ERROR     = JOB_STATUS_ERROR

-- | Computes a queued job's status.
calcJobStatus :: QueuedJob -> JobStatus
calcJobStatus QueuedJob { qjOps = ops } =
  extractOpSt (map qoStatus ops) JOB_STATUS_QUEUED True
    where
      terminalStatus OP_STATUS_ERROR     = True
      terminalStatus OP_STATUS_CANCELING = True
      terminalStatus OP_STATUS_CANCELED  = True
      terminalStatus _                   = False
      softStatus     OP_STATUS_SUCCESS   = True
      softStatus     OP_STATUS_QUEUED    = True
      softStatus     _                   = False
      extractOpSt [] _ True = JOB_STATUS_SUCCESS
      extractOpSt [] d False = d
      extractOpSt (x:xs) d old_all
           | terminalStatus x = opStatusToJob x -- abort recursion
           | softStatus x     = extractOpSt xs d new_all -- continue unchanged
           | otherwise        = extractOpSt xs (opStatusToJob x) new_all
           where new_all = x == OP_STATUS_SUCCESS && old_all

386
387
388
389
-- | Determine if a job has started
jobStarted :: QueuedJob -> Bool
jobStarted = (> JOB_STATUS_QUEUED) . calcJobStatus

390
391
392
393
-- | Determine if a job is finalised.
jobFinalized :: QueuedJob -> Bool
jobFinalized = (> JOB_STATUS_RUNNING) . calcJobStatus

394
395
396
-- | Determine if a job is finalized and its timestamp is before
-- a given time.
jobArchivable :: Timestamp -> QueuedJob -> Bool
397
jobArchivable ts = liftA2 (&&) jobFinalized
398
399
400
  $ maybe False (< ts)
    .  liftA2 (<|>) qjEndTimestamp qjStartTimestamp

Iustin Pop's avatar
Iustin Pop committed
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
-- | Determine whether an opcode status is finalized.
opStatusFinalized :: OpStatus -> Bool
opStatusFinalized = (> OP_STATUS_RUNNING)

-- | Compute a job's priority.
calcJobPriority :: QueuedJob -> Int
calcJobPriority QueuedJob { qjOps = ops } =
  helper . map qoPriority $ filter (not . opStatusFinalized . qoStatus) ops
    where helper [] = C.opPrioDefault
          helper ps = minimum ps

-- | Log but ignore an 'IOError'.
ignoreIOError :: a -> Bool -> String -> IOError -> IO a
ignoreIOError a ignore_noent msg e = do
  unless (isDoesNotExistError e && ignore_noent) .
    logWarning $ msg ++ ": " ++ show e
  return a

-- | Compute the list of existing archive directories. Note that I/O
-- exceptions are swallowed and ignored.
allArchiveDirs :: FilePath -> IO [FilePath]
allArchiveDirs rootdir = do
  let adir = rootdir </> jobQueueArchiveSubDir
  contents <- getDirectoryContents adir `Control.Exception.catch`
               ignoreIOError [] False
                 ("Failed to list queue directory " ++ adir)
  let fpaths = map (adir </>) $ filter (not . ("." `isPrefixOf`)) contents
  filterM (\path ->
             liftM isDirectory (getFileStatus (adir </> path))
               `Control.Exception.catch`
               ignoreIOError False True
                 ("Failed to stat archive path " ++ path)) fpaths

-- | Build list of directories containing job files. Note: compared to
-- the Python version, this doesn't ignore a potential lost+found
-- file.
determineJobDirectories :: FilePath -> Bool -> IO [FilePath]
determineJobDirectories rootdir archived = do
  other <- if archived
             then allArchiveDirs rootdir
             else return []
  return $ rootdir:other

-- | Computes the list of all jobs in the given directories.
445
446
getJobIDs :: [FilePath] -> IO (GenericResult IOError [JobId])
getJobIDs = runResultT . liftM concat . mapM getDirJobIDs
Iustin Pop's avatar
Iustin Pop committed
447
448
449
450
451
452

-- | Sorts the a list of job IDs.
sortJobIDs :: [JobId] -> [JobId]
sortJobIDs = sortBy (comparing fromJobId)

-- | Computes the list of jobs in a given directory.
453
454
455
456
getDirJobIDs :: FilePath -> ResultT IOError IO [JobId]
getDirJobIDs path =
  withErrorLogAt WARNING ("Failed to list job directory " ++ path) .
    liftM (mapMaybe parseJobFileId) $ liftIO (getDirectoryContents path)
Iustin Pop's avatar
Iustin Pop committed
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487

-- | Reads the job data from disk.
readJobDataFromDisk :: FilePath -> Bool -> JobId -> IO (Maybe (String, Bool))
readJobDataFromDisk rootdir archived jid = do
  let live_path = liveJobFile rootdir jid
      archived_path = archivedJobFile rootdir jid
      all_paths = if archived
                    then [(live_path, False), (archived_path, True)]
                    else [(live_path, False)]
  foldM (\state (path, isarchived) ->
           liftM (\r -> Just (r, isarchived)) (readFile path)
             `Control.Exception.catch`
             ignoreIOError state True
               ("Failed to read job file " ++ path)) Nothing all_paths

-- | Failed to load job error.
noSuchJob :: Result (QueuedJob, Bool)
noSuchJob = Bad "Can't load job file"

-- | Loads a job from disk.
loadJobFromDisk :: FilePath -> Bool -> JobId -> IO (Result (QueuedJob, Bool))
loadJobFromDisk rootdir archived jid = do
  raw <- readJobDataFromDisk rootdir archived jid
  -- note: we need some stricness below, otherwise the wrapping in a
  -- Result will create too much lazyness, and not close the file
  -- descriptors for the individual jobs
  return $! case raw of
             Nothing -> noSuchJob
             Just (str, arch) ->
               liftM (\qj -> (qj, arch)) .
               fromJResult "Parsing job file" $ Text.JSON.decode str
488

489
490
491
492
493
494
495
496
-- | Write a job to disk.
writeJobToDisk :: FilePath -> QueuedJob -> IO (Result ())
writeJobToDisk rootdir job = do
  let filename = liveJobFile rootdir . qjId $ job
      content = Text.JSON.encode . Text.JSON.showJSON $ job
  tryAndLogIOError (atomicWriteFile filename content)
                   ("Failed to write " ++ filename) Ok

497
498
499
500
501
-- | Replicate a job to all master candidates.
replicateJob :: FilePath -> [Node] -> QueuedJob -> IO [(Node, ERpcError ())]
replicateJob rootdir mastercandidates job = do
  let filename = liveJobFile rootdir . qjId $ job
      content = Text.JSON.encode . Text.JSON.showJSON $ job
502
  filename' <- makeVirtualPath filename
503
  callresult <- executeRpcCall mastercandidates
504
                  $ RpcCallJobqueueUpdate filename' content
505
  let result = map (second (() <$)) callresult
506
  _ <- logRpcErrors result
507
508
  return result

509
510
511
512
513
-- | Replicate many jobs to all master candidates.
replicateManyJobs :: FilePath -> [Node] -> [QueuedJob] -> IO ()
replicateManyJobs rootdir mastercandidates =
  mapM_ (replicateJob rootdir mastercandidates)

514
515
516
517
518
519
520
521
-- | Writes a job to a file and replicates it to master candidates.
writeAndReplicateJob :: (Error e)
                     => ConfigData -> FilePath -> QueuedJob
                     -> ResultT e IO [(Node, ERpcError ())]
writeAndReplicateJob cfg rootdir job = do
  mkResultT $ writeJobToDisk rootdir job
  liftIO $ replicateJob rootdir (Config.getMasterCandidates cfg) job

522
523
524
525
526
527
-- | Read the job serial number from disk.
readSerialFromDisk :: IO (Result JobId)
readSerialFromDisk = do
  filename <- jobQueueSerialFile
  tryAndLogIOError (readFile filename) "Failed to read serial file"
                   (makeJobIdS . rStripSpace)
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550

-- | Allocate new job ids.
-- To avoid races while accessing the serial file, the threads synchronize
-- over a lock, as usual provided by an MVar.
allocateJobIds :: [Node] -> MVar () -> Int -> IO (Result [JobId])
allocateJobIds mastercandidates lock n =
  if n <= 0
    then return . Bad $ "Can only allocate positive number of job ids"
    else do
      takeMVar lock
      rjobid <- readSerialFromDisk
      case rjobid of
        Bad s -> do
          putMVar lock ()
          return . Bad $ s
        Ok jid -> do
          let current = fromJobId jid
              serial_content = show (current + n) ++  "\n"
          serial <- jobQueueSerialFile
          write_result <- try $ atomicWriteFile serial serial_content
                          :: IO (Either IOError ())
          case write_result of
            Left e -> do
551
              putMVar lock ()
552
553
              let msg = "Failed to write serial file: " ++ show e
              logError msg
554
              return . Bad $ msg
555
            Right () -> do
556
              serial' <- makeVirtualPath serial
557
              _ <- executeRpcCall mastercandidates
558
                     $ RpcCallJobqueueUpdate serial' serial_content
559
              putMVar lock ()
560
561
562
563
564
565
566
              return $ mapM makeJobId [(current+1)..(current+n)]

-- | Allocate one new job id.
allocateJobId :: [Node] -> MVar () -> IO (Result JobId)
allocateJobId mastercandidates lock = do
  jids <- allocateJobIds mastercandidates lock 1
  return (jids >>= monadicThe "Failed to allocate precisely one Job ID")
567
568
569
570

-- | Decide if job queue is open
isQueueOpen :: IO Bool
isQueueOpen = liftM not (jobQueueDrainFile >>= doesFileExist)
Klaus Aehlig's avatar
Klaus Aehlig committed
571

572
573
574
575
576
577
578
-- | Start enqueued jobs by executing the Python code.
startJobs :: ConfigData
          -> Livelock -- ^ Luxi's livelock path
          -> [QueuedJob] -- ^ the list of jobs to start
          -> IO [ErrorResult QueuedJob]
startJobs cfg luxiLivelock jobs = do
  qdir <- queueDir
Petr Pudlak's avatar
Petr Pudlak committed
579
580
  let updateJob job llfile =
        void . writeAndReplicateJob cfg qdir $ job { qjLivelock = Just llfile }
581
  let runJob job = do
Petr Pudlak's avatar
Petr Pudlak committed
582
583
        (llfile, _) <- Exec.forkJobProcess (qjId job) luxiLivelock
                                           (updateJob job)
584
585
        return $ job { qjLivelock = Just llfile }
  mapM (runResultT . runJob) jobs
Klaus Aehlig's avatar
Klaus Aehlig committed
586

Petr Pudlak's avatar
Petr Pudlak committed
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
-- | Waits for a job to finalize its execution.
waitForJob :: JobId -> Int -> ResultG (Bool, String)
waitForJob jid tmout = do
  qDir <- liftIO queueDir
  let jobfile = liveJobFile qDir jid
      load = liftM fst <$> loadJobFromDisk qDir False jid
  jobR <- liftIO $ watchFileBy jobfile tmout
                               (genericResult (const False) jobFinalized) load
  case calcJobStatus <$> jobR of
    Ok s | s == JOB_STATUS_CANCELED ->
             return (True, "Job successfully cancelled")
         | otherwise ->
             return (False, "Job exited with status " ++ show s)
    Bad e -> failError $ "Can't read job status: " ++ e

Klaus Aehlig's avatar
Klaus Aehlig committed
602
-- | Try to cancel a job that has already been handed over to execution,
Petr Pudlak's avatar
Petr Pudlak committed
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
-- by terminating the process.
cancelJob :: JobId -> IO (ErrorResult (Bool, String))
cancelJob jid = runResultT $ do
  -- we can't terminate the job if it's just being started, so
  -- retry several times in such a case
  result <- runMaybeT . msum . flip map [0..5 :: Int] $ \tryNo -> do
    -- if we're retrying, sleep for some time
    when (tryNo > 0) . liftIO . threadDelay $ 100000 * (2 ^ tryNo)

    -- first check if the job is alive so that we don't kill some other
    -- process by accident
    qDir <- liftIO queueDir
    (job, _) <- lift . mkResultT $ loadJobFromDisk qDir True jid
    let jName = ("Job " ++) . show . fromJobId . qjId $ job
    dead <- maybe (return False) (liftIO . isDead) (qjLivelock job)
    case qjProcessId job of
      _ | dead ->
        return (True, jName ++ " has been already dead")
      Just pid -> do
        liftIO $ signalProcess sigTERM pid
        lift $ waitForJob jid C.luxiCancelJobTimeout
      _ -> do
        logDebug $ jName ++ " in its startup phase, retrying"
        mzero
  return $ fromMaybe (False, "Timeout: job still in its startup phase") result
628

629
630
631
632
633
634
635
-- | Permissions for the archive directories.
queueDirPermissions :: FilePermissions
queueDirPermissions = FilePermissions { fpOwner = Just C.masterdUser
                                      , fpGroup = Just C.daemonsGroup
                                      , fpPermissions = 0o0750
                                      }

636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
-- | Try, at most until the given endtime, to archive some of the given
-- jobs, if they are older than the specified cut-off time; also replicate
-- archival of the additional jobs. Return the pair of the number of jobs
-- archived, and the number of jobs remaining int he queue, asuming the
-- given numbers about the not considered jobs.
archiveSomeJobsUntil :: ([JobId] -> IO ()) -- ^ replication function
                        -> FilePath -- ^ queue root directory
                        -> ClockTime -- ^ Endtime
                        -> Timestamp -- ^ cut-off time for archiving jobs
                        -> Int -- ^ number of jobs alread archived
                        -> [JobId] -- ^ Additional jobs to replicate
                        -> [JobId] -- ^ List of job-ids still to consider
                        -> IO (Int, Int)
archiveSomeJobsUntil replicateFn _ _ _ arch torepl [] = do
  unless (null torepl) . (>> return ())
   . forkIO $ replicateFn torepl
  return (arch, 0)

archiveSomeJobsUntil replicateFn qDir endt cutt arch torepl (jid:jids) = do
  let archiveMore = archiveSomeJobsUntil replicateFn qDir endt cutt
      continue = archiveMore arch torepl jids
      jidname = show $ fromJobId jid
  time <- getClockTime
  if time >= endt
    then do
      _ <- forkIO $ replicateFn torepl
      return (arch, length (jid:jids))
    else do
      logDebug $ "Inspecting job " ++ jidname ++ " for archival"
      loadResult <- loadJobFromDisk qDir False jid
      case loadResult of
        Bad _ -> continue
668
        Ok (job, _) ->
669
670
671
672
          if jobArchivable cutt job
            then do
              let live = liveJobFile qDir jid
                  archive = archivedJobFile qDir jid
673
674
              renameResult <- safeRenameFile queueDirPermissions
                                live archive
675
              case renameResult of
676
677
678
679
680
681
682
683
684
685
686
687
                Bad s -> do
                  logWarning $ "Renaming " ++ live ++ " to " ++ archive
                                 ++ " failed unexpectedly: " ++ s
                  continue
                Ok () -> do
                  let torepl' = jid:torepl
                  if length torepl' >= 10
                    then do
                      _ <- forkIO $ replicateFn torepl'
                      archiveMore (arch + 1) [] jids
                    else archiveMore (arch + 1) torepl' jids
            else continue
688

689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
-- | Archive jobs older than the given time, but do not exceed the timeout for
-- carrying out this task.
archiveJobs :: ConfigData -- ^ cluster configuration
               -> Int  -- ^ time the job has to be in the past in order
                       -- to be archived
               -> Int -- ^ timeout
               -> [JobId] -- ^ jobs to consider
               -> IO (Int, Int)
archiveJobs cfg age timeout jids = do
  now <- getClockTime
  qDir <- queueDir
  let endtime = addToClockTime (noTimeDiff { tdSec = timeout }) now
      cuttime = if age < 0 then noTimestamp
                           else advanceTimestamp (- age) (fromClockTime now)
      mcs = Config.getMasterCandidates cfg
      replicateFn jobs = do
        let olds = map (liveJobFile qDir) jobs
            news = map (archivedJobFile qDir) jobs
        _ <- executeRpcCall mcs . RpcCallJobqueueRename $ zip olds news
        return ()
  archiveSomeJobsUntil replicateFn qDir endtime cuttime 0 [] jids