Commit 525bfb36 authored by Iustin Pop's avatar Iustin Pop
Browse files

htools: docstring fixes and improvements



No code changes (except one definition being moved around in QC.hs)
are contained in this patch.
Signed-off-by: default avatarIustin Pop <iustin@google.com>
Reviewed-by: default avatarMichael Hanselmann <hansmi@google.com>
parent 606e71d3
{-| Implementation of command-line functions.
This module holds the common cli-related functions for the binaries,
separated into this module since Utils.hs is used in many other places
and this is more IO oriented.
This module holds the common command-line related functions for the
binaries, separated into this module since "Ganeti.HTools.Utils" is
used in many other places and this is more IO oriented.
-}
......@@ -89,10 +89,16 @@ import qualified Ganeti.Constants as C
import Ganeti.HTools.Types
import Ganeti.HTools.Utils
-- | The default value for the luxi socket
-- * Constants
-- | The default value for the luxi socket.
--
-- This is re-exported from the "Ganeti.Constants" module.
defaultLuxiSocket :: FilePath
defaultLuxiSocket = C.masterSocket
-- * Data types
-- | Command line options structure.
data Options = Options
{ optDataFile :: Maybe FilePath -- ^ Path to the cluster data file
......@@ -170,9 +176,11 @@ defaultOptions = Options
, optVerbose = 1
}
-- | Abrreviation for the option type
-- | Abrreviation for the option type.
type OptType = OptDescr (Options -> Result Options)
-- * Command line options
oDataFile :: OptType
oDataFile = Option "t" ["text-data"]
(ReqArg (\ f o -> Ok o { optDataFile = Just f }) "FILE")
......@@ -394,13 +402,15 @@ oVerbose = Option "v" ["verbose"]
(NoArg (\ opts -> Ok opts { optVerbose = optVerbose opts + 1 }))
"increase the verbosity level"
-- | Usage info
-- * Functions
-- | Usage info.
usageHelp :: String -> [OptType] -> String
usageHelp progname =
usageInfo (printf "%s %s\nUsage: %s [OPTION...]"
progname Version.version progname)
-- | Command line parser, using the 'options' structure.
-- | Command line parser, using the 'Options' structure.
parseOpts :: [String] -- ^ The command line arguments
-> String -- ^ The program name
-> [OptType] -- ^ The supported command line options
......
{-| Implementation of cluster-wide logic.
This module holds all pure cluster-logic; I\/O related functionality
goes into the "Main" module for the individual binaries.
goes into the /Main/ module for the individual binaries.
-}
......@@ -106,17 +106,18 @@ type AllocResult = (FailStats, Node.List, Instance.List,
-- | A type denoting the valid allocation mode/pairs.
--
-- For a one-node allocation, this will be a @Left ['Node.Node']@,
-- whereas for a two-node allocation, this will be a @Right
-- [('Node.Node', 'Node.Node')]@.
type AllocNodes = Either [Ndx] [(Ndx, Ndx)]
-- | The empty solution we start with when computing allocations
-- | The empty solution we start with when computing allocations.
emptySolution :: AllocSolution
emptySolution = AllocSolution { asFailures = [], asAllocs = 0
, asSolutions = [], asLog = [] }
-- | The complete state for the balancing solution
-- | The complete state for the balancing solution.
data Table = Table Node.List Instance.List Score [Placement]
deriving (Show, Read)
......@@ -144,7 +145,7 @@ data CStats = CStats { csFmem :: Integer -- ^ Cluster free mem
}
deriving (Show, Read)
-- | Currently used, possibly to allocate, unallocable
-- | Currently used, possibly to allocate, unallocable.
type AllocStats = (RSpec, RSpec, RSpec)
-- * Utility functions
......@@ -170,11 +171,11 @@ computeBadItems nl il =
in
(bad_nodes, bad_instances)
-- | Zero-initializer for the CStats type
-- | Zero-initializer for the CStats type.
emptyCStats :: CStats
emptyCStats = CStats 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-- | Update stats with data from a new node
-- | Update stats with data from a new node.
updateCStats :: CStats -> Node.Node -> CStats
updateCStats cs node =
let CStats { csFmem = x_fmem, csFdsk = x_fdsk,
......@@ -243,7 +244,7 @@ computeAllocationDelta cini cfin =
(truncate t_dsk - fromIntegral f_idsk)
in (rini, rfin, runa)
-- | The names and weights of the individual elements in the CV list
-- | The names and weights of the individual elements in the CV list.
detailedCVInfo :: [(Double, String)]
detailedCVInfo = [ (1, "free_mem_cv")
, (1, "free_disk_cv")
......@@ -311,11 +312,11 @@ compDetailedCV nl =
compCV :: Node.List -> Double
compCV = sum . zipWith (*) detailedCVWeights . compDetailedCV
-- | Compute online nodes from a Node.List
-- | Compute online nodes from a 'Node.List'.
getOnline :: Node.List -> [Node.Node]
getOnline = filter (not . Node.offline) . Container.elems
-- * hbal functions
-- * Balancing functions
-- | Compute best table. Note that the ordering of the arguments is important.
compareTables :: Table -> Table -> Table
......@@ -534,7 +535,7 @@ checkMove nodes_idx disk_moves inst_moves ini_tbl victims =
then ini_tbl -- no advancement
else best_tbl
-- | Check if we are allowed to go deeper in the balancing
-- | Check if we are allowed to go deeper in the balancing.
doNextBalance :: Table -- ^ The starting table
-> Int -- ^ Remaining length
-> Score -- ^ Score at which to stop
......@@ -544,7 +545,7 @@ doNextBalance ini_tbl max_rounds min_score =
ini_plc_len = length ini_plc
in (max_rounds < 0 || ini_plc_len < max_rounds) && ini_cv > min_score
-- | Run a balance move
-- | Run a balance move.
tryBalance :: Table -- ^ The starting table
-> Bool -- ^ Allow disk moves
-> Bool -- ^ Allow instance moves
......@@ -574,13 +575,13 @@ tryBalance ini_tbl disk_moves inst_moves evac_mode mg_limit min_gain =
-- * Allocation functions
-- | Build failure stats out of a list of failures
-- | Build failure stats out of a list of failures.
collapseFailures :: [FailMode] -> FailStats
collapseFailures flst =
map (\k -> (k, length $ filter (k ==) flst)) [minBound..maxBound]
-- | Update current Allocation solution and failure stats with new
-- elements
-- elements.
concatAllocs :: AllocSolution -> OpResult Node.AllocElement -> AllocSolution
concatAllocs as (OpFail reason) = as { asFailures = reason : asFailures as }
......@@ -611,7 +612,7 @@ sumAllocs :: AllocSolution -> AllocSolution -> AllocSolution
sumAllocs (AllocSolution af aa as al) (AllocSolution bf ba bs bl) =
AllocSolution (af ++ bf) (aa + ba) (as ++ bs) (al ++ bl)
-- | Given a solution, generates a reasonable description for it
-- | Given a solution, generates a reasonable description for it.
describeSolution :: AllocSolution -> String
describeSolution as =
let fcnt = asFailures as
......@@ -629,7 +630,7 @@ describeSolution as =
" for node(s) %s") cv (asAllocs as) (length fcnt) freasons
(intercalate "/" . map Node.name $ nodes)
-- | Annotates a solution with the appropriate string
-- | Annotates a solution with the appropriate string.
annotateSolution :: AllocSolution -> AllocSolution
annotateSolution as = as { asLog = describeSolution as : asLog as }
......@@ -678,7 +679,7 @@ tryAlloc nl _ inst (Left all_nodes) =
then fail "No online nodes"
else return $ annotateSolution sols
-- | Given a group/result, describe it as a nice (list of) messages
-- | Given a group/result, describe it as a nice (list of) messages.
solutionDescription :: Group.List -> (Gdx, Result AllocSolution) -> [String]
solutionDescription gl (groupId, result) =
case result of
......@@ -690,7 +691,7 @@ solutionDescription gl (groupId, result) =
-- | From a list of possibly bad and possibly empty solutions, filter
-- only the groups with a valid result. Note that the result will be
-- reversed compared to the original list
-- reversed compared to the original list.
filterMGResults :: Group.List
-> [(Gdx, Result AllocSolution)]
-> [(Gdx, AllocSolution)]
......@@ -703,7 +704,7 @@ filterMGResults gl = foldl' fn []
| unallocable gdx -> accu
| otherwise -> (gdx, sol):accu
-- | Sort multigroup results based on policy and score
-- | Sort multigroup results based on policy and score.
sortMGResults :: Group.List
-> [(Gdx, AllocSolution)]
-> [(Gdx, AllocSolution)]
......@@ -782,7 +783,7 @@ tryMGReloc _ mgnl mgil xid ncount ex_ndx = do
Just v -> return v
tryReloc nl il xid ncount ex_ndx
-- | Change an instance's secondary node
-- | Change an instance's secondary node.
evacInstance :: (Monad m) =>
[Ndx] -- ^ Excluded nodes
-> Instance.List -- ^ The current instance list
......@@ -854,7 +855,7 @@ tryMGEvac _ nl il ex_ndx =
let sol = foldl' sumAllocs emptySolution results
return $ annotateSolution sol
-- | Recursively place instances on the cluster until we're out of space
-- | Recursively place instances on the cluster until we're out of space.
iterateAlloc :: Node.List
-> Instance.List
-> Instance.Instance
......@@ -879,7 +880,7 @@ iterateAlloc nl il newinst allocnodes ixes cstats =
_ -> Bad "Internal error: multiple solutions for single\
\ allocation"
-- | The core of the tiered allocation mode
-- | The core of the tiered allocation mode.
tieredAlloc :: Node.List
-> Instance.List
-> Instance.Instance
......@@ -1072,7 +1073,7 @@ iMoveToJob nl il idx move =
-- * Node group functions
-- | Computes the group of an instance
-- | Computes the group of an instance.
instanceGroup :: Node.List -> Instance.Instance -> Result Gdx
instanceGroup nl i =
let sidx = Instance.sNode i
......@@ -1087,19 +1088,19 @@ instanceGroup nl i =
show pgroup ++ ", secondary " ++ show sgroup)
else return pgroup
-- | Computes the group of an instance per the primary node
-- | Computes the group of an instance per the primary node.
instancePriGroup :: Node.List -> Instance.Instance -> Gdx
instancePriGroup nl i =
let pnode = Container.find (Instance.pNode i) nl
in Node.group pnode
-- | Compute the list of badly allocated instances (split across node
-- groups)
-- groups).
findSplitInstances :: Node.List -> Instance.List -> [Instance.Instance]
findSplitInstances nl =
filter (not . isOk . instanceGroup nl) . Container.elems
-- | Splits a cluster into the component node groups
-- | Splits a cluster into the component node groups.
splitCluster :: Node.List -> Instance.List ->
[(Gdx, (Node.List, Instance.List))]
splitCluster nl il =
......
{-| External data loader
{-| External data loader.
This module holds the external data loading, and thus is the only one
depending (via the specialized Text\/Rapi\/Luxi modules) on the actual
......@@ -51,7 +51,7 @@ import Ganeti.HTools.Types
import Ganeti.HTools.CLI
import Ganeti.HTools.Utils (sepSplit, tryRead)
-- | Error beautifier
-- | Error beautifier.
wrapIO :: IO (Result a) -> IO (Result a)
wrapIO = flip catch (return . Bad . show)
......
......@@ -60,7 +60,7 @@ parseBaseInstance n a = do
let running = "running"
return (n, Instance.create n mem disk vcpus running tags True 0 0)
-- | Parses an instance as found in the cluster instance listg.
-- | Parses an instance as found in the cluster instance list.
parseInstance :: NameAssoc -- ^ The node name-to-index association list
-> String -- ^ The name of the instance
-> JSRecord -- ^ The JSON object
......
......@@ -49,7 +49,7 @@ import qualified Ganeti.Constants as C
-- * Type declarations
-- | The instance type
-- | The instance type.
data Instance = Instance
{ name :: String -- ^ The instance name
, alias :: String -- ^ The shortened name
......@@ -74,7 +74,7 @@ instance T.Element Instance where
setIdx = setIdx
allNames n = [name n, alias n]
-- | Running instance states.
-- | Constant holding the running instance states.
runningStates :: [String]
runningStates = [C.inststRunning, C.inststErrorup]
......
{-| Generic data loader
{-| Generic data loader.
This module holds the common code for parsing the input data after it
has been loaded from external sources.
......@@ -54,7 +54,7 @@ import Ganeti.HTools.Types
-- * Constants
-- | The exclusion tag prefix
-- | The exclusion tag prefix.
exTagsPrefix :: String
exTagsPrefix = "htools:iextags:"
......@@ -147,7 +147,7 @@ fixNodes accu inst =
in Container.add sdx snew ac2
else ac2
-- | Remove non-selected tags from the exclusion list
-- | Remove non-selected tags from the exclusion list.
filterExTags :: [String] -> Instance.Instance -> Instance.Instance
filterExTags tl inst =
let old_tags = Instance.tags inst
......@@ -155,7 +155,7 @@ filterExTags tl inst =
old_tags
in inst { Instance.tags = new_tags }
-- | Update the movable attribute
-- | Update the movable attribute.
updateMovable :: [String] -- ^ Selected instances (if not empty)
-> [String] -- ^ Excluded instances
-> Instance.Instance -- ^ Target Instance
......@@ -168,7 +168,7 @@ updateMovable selinsts exinsts inst =
else inst
-- | Compute the longest common suffix of a list of strings that
-- | starts with a dot.
-- starts with a dot.
longestDomain :: [String] -> String
longestDomain [] = ""
longestDomain (x:xs) =
......@@ -177,13 +177,13 @@ longestDomain (x:xs) =
else accu)
"" $ filter (isPrefixOf ".") (tails x)
-- | Extracts the exclusion tags from the cluster configuration
-- | Extracts the exclusion tags from the cluster configuration.
extractExTags :: [String] -> [String]
extractExTags =
map (drop (length exTagsPrefix)) .
filter (isPrefixOf exTagsPrefix)
-- | Extracts the common suffix from node\/instance names
-- | Extracts the common suffix from node\/instance names.
commonSuffix :: Node.List -> Instance.List -> String
commonSuffix nl il =
let node_names = map Node.name $ Container.elems nl
......
......@@ -187,7 +187,7 @@ parseGroup v = fail ("Invalid group query result: " ++ show v)
-- * Main loader functionality
-- | Builds the cluster data from an URL.
-- | Builds the cluster data by querying a given socket name.
readData :: String -- ^ Unix socket to use as source
-> IO (Result JSValue, Result JSValue, Result JSValue, Result JSValue)
readData master =
......@@ -202,6 +202,8 @@ readData master =
return (groups, nodes, instances, cinfo)
)
-- | Converts the output of 'readData' into the internal cluster
-- representation.
parseData :: (Result JSValue, Result JSValue, Result JSValue, Result JSValue)
-> Result ClusterData
parseData (groups, nodes, instances, cinfo) = do
......@@ -214,7 +216,7 @@ parseData (groups, nodes, instances, cinfo) = do
ctags <- cinfo >>= getClusterTags
return (ClusterData group_idx node_idx inst_idx ctags)
-- | Top level function for data loading
-- | Top level function for data loading.
loadData :: String -- ^ Unix socket to use as source
-> IO (Result ClusterData)
loadData = fmap parseData . readData
......@@ -85,7 +85,7 @@ import qualified Ganeti.HTools.Types as T
-- * Type declarations
-- | The tag map type
-- | The tag map type.
type TagMap = Map.Map String Int
-- | The node type.
......@@ -140,7 +140,7 @@ type AssocList = [(T.Ndx, Node)]
type List = Container.Container Node
-- | A simple name for an allocation element (here just for logistic
-- reasons)
-- reasons).
type AllocElement = (List, Instance.Instance, [Node], T.Score)
-- | Constant node index for a non-moveable instance.
......@@ -149,26 +149,26 @@ noSecondary = -1
-- * Helper functions
-- | Add a tag to a tagmap
-- | Add a tag to a tagmap.
addTag :: TagMap -> String -> TagMap
addTag t s = Map.insertWith (+) s 1 t
-- | Add multiple tags
-- | Add multiple tags.
addTags :: TagMap -> [String] -> TagMap
addTags = foldl' addTag
-- | Adjust or delete a tag from a tagmap
-- | Adjust or delete a tag from a tagmap.
delTag :: TagMap -> String -> TagMap
delTag t s = Map.update (\v -> if v > 1
then Just (v-1)
else Nothing)
s t
-- | Remove multiple tags
-- | Remove multiple tags.
delTags :: TagMap -> [String] -> TagMap
delTags = foldl' delTag
-- | Check if we can add a list of tags to a tagmap
-- | Check if we can add a list of tags to a tagmap.
rejectAddTags :: TagMap -> [String] -> Bool
rejectAddTags t = any (`Map.member` t)
......@@ -221,11 +221,11 @@ create name_init mem_t_init mem_n_init mem_f_init
, group = group_init
}
-- | Conversion formula from mDsk\/tDsk to loDsk
-- | Conversion formula from mDsk\/tDsk to loDsk.
mDskToloDsk :: Double -> Double -> Int
mDskToloDsk mval tdsk = floor (mval * tdsk)
-- | Conversion formula from mCpu\/tCpu to hiCpu
-- | Conversion formula from mCpu\/tCpu to hiCpu.
mCpuTohiCpu :: Double -> Double -> Int
mCpuTohiCpu mval tcpu = floor (mval * tcpu)
......@@ -249,11 +249,11 @@ setOffline t val = t { offline = val }
setXmem :: Node -> Int -> Node
setXmem t val = t { xMem = val }
-- | Sets the max disk usage ratio
-- | Sets the max disk usage ratio.
setMdsk :: Node -> Double -> Node
setMdsk t val = t { mDsk = val, loDsk = mDskToloDsk val (tDsk t) }
-- | Sets the max cpu usage ratio
-- | Sets the max cpu usage ratio.
setMcpu :: Node -> Double -> Node
setMcpu t val = t { mCpu = val, hiCpu = mCpuTohiCpu val (tCpu t) }
......@@ -435,7 +435,7 @@ addSecEx force t inst pdx =
-- * Stats functions
-- | Computes the amount of available disk on a given node
-- | Computes the amount of available disk on a given node.
availDisk :: Node -> Int
availDisk t =
let _f = fDsk t
......@@ -444,11 +444,11 @@ availDisk t =
then 0
else _f - _l
-- | Computes the amount of used disk on a given node
-- | Computes the amount of used disk on a given node.
iDsk :: Node -> Int
iDsk t = truncate (tDsk t) - fDsk t
-- | Computes the amount of available memory on a given node
-- | Computes the amount of available memory on a given node.
availMem :: Node -> Int
availMem t =
let _f = fMem t
......@@ -457,7 +457,7 @@ availMem t =
then 0
else _f - _l
-- | Computes the amount of available memory on a given node
-- | Computes the amount of available memory on a given node.
availCpu :: Node -> Int
availCpu t =
let _u = uCpu t
......@@ -472,7 +472,10 @@ iMem t = truncate (tMem t) - nMem t - xMem t - fMem t
-- * Display functions
showField :: Node -> String -> String
-- | Return a field for a given node.
showField :: Node -- ^ Node which we're querying
-> String -- ^ Field name
-> String -- ^ Field value as string
showField t field =
case field of
"idx" -> printf "%4d" $ idx t
......@@ -512,7 +515,7 @@ showField t field =
T.DynUtil { T.cpuWeight = uC, T.memWeight = uM,
T.dskWeight = uD, T.netWeight = uN } = utilLoad t
-- | Returns the header and numeric propery of a field
-- | Returns the header and numeric propery of a field.
showHeader :: String -> (String, Bool)
showHeader field =
case field of
......@@ -552,6 +555,7 @@ list :: [String] -> Node -> [String]
list fields t = map (showField t) fields
-- | Constant holding the fields we're displaying by default.
defaultFields :: [String]
defaultFields =
[ "status", "name", "tmem", "nmem", "imem", "xmem", "fmem"
......@@ -560,7 +564,7 @@ defaultFields =
, "cload", "mload", "dload", "nload" ]
-- | Split a list of nodes into a list of (node group UUID, list of
-- associated nodes).
computeGroups :: [Node] -> [(T.Gdx, [Node])]
computeGroups nodes =
let nodes' = sortBy (comparing group) nodes
......
{-|
Module abstracting the peer map implementation.
{-| Module abstracting the peer map implementation.
This is abstracted separately since the speed of peermap updates can
be a significant part of the total runtime, and as such changing the
......@@ -46,8 +45,16 @@ import Data.Ord (comparing)
import Ganeti.HTools.Types
-- * Type definitions
-- | Our key type.
type Key = Ndx
-- | Our element type.
type Elem = Int
-- | The definition of a peer map.
type PeerMap = [(Key, Elem)]
-- * Initialization functions
......@@ -67,7 +74,7 @@ addWith fn k v lst =
Nothing -> insertBy pmCompare (k, v) lst
Just o -> insertBy pmCompare (k, fn o v) (remove k lst)
-- | Create a PeerMap from an association list, with possible duplicates
-- | Create a PeerMap from an association list, with possible duplicates.
accumArray :: (Elem -> Elem -> Elem) -- ^ function used to merge the elements
-> [(Key, Elem)] -- ^ source data
-> PeerMap -- ^ results
......@@ -76,15 +83,15 @@ accumArray fn ((k, v):xs) = addWith fn k v $ accumArray fn xs
-- * Basic operations
-- | Returns either the value for a key or zero if not found
-- | Returns either the value for a key or zero if not found.
find :: Key -> PeerMap -> Elem
find k = fromMaybe 0 . lookup k
-- | Add an element to a peermap, overwriting the previous value
-- | Add an element to a peermap, overwriting the previous value.
add :: Key -> Elem -> PeerMap -> PeerMap
add = addWith (flip const)
-- | Remove an element from a peermap
-- | Remove an element from a peermap.
remove :: Key -> PeerMap -> PeerMap
remove _ [] = []
remove k ((x@(x', _)):xs) = if k == x'
......
{-| Unittests for ganeti-htools
{-| Unittests for ganeti-htools.
-}
......@@ -70,15 +70,15 @@ run = flip quickCheckWithResult
-- * Constants
-- | Maximum memory (1TiB, somewhat random value)
-- | Maximum memory (1TiB, somewhat random value).
maxMem :: Int
maxMem = 1024 * 1024
-- | Maximum disk (8TiB, somewhat random value)
-- | Maximum disk (8TiB, somewhat random value).
maxDsk :: Int
maxDsk = 1024 * 1024 * 8
-- | Max CPUs (1024, somewhat random value)
-- | Max CPUs (1024, somewhat random value).
maxCpu :: Int
maxCpu = 1024
......@@ -95,23 +95,23 @@ defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
-- * Helper functions
-- | Simple checker for whether OpResult is fail or pass
-- | Simple checker for whether OpResult is fail or pass.
isFailure :: Types.OpResult a -> Bool
isFailure (Types.OpFail _) = True
isFailure _ = False
-- | Update an instance to be smaller than a node
-- | Update an instance to be smaller than a node.
setInstanceSmallerThanNode node inst =
inst { Instance.mem = Node.availMem node `div` 2
, Instance.dsk = Node.availDisk node `div` 2
, Instance.vcpus = Node.availCpu node `div` 2
}
-- | Create an instance given its spec
-- | Create an instance given its spec.
createInstance mem dsk vcpus =
Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
-- | Create a small cluster by repeating a node spec