Commit 70c708fc authored by Iustin Pop's avatar Iustin Pop
Browse files

Merge branch 'next'

* next:
  Update documentation for the iextags
  Re-wrap the README
  Configure exclusion tags via the cluster tags
  hail: add '-p' option intended for debugging use
  Read cluster tags in the IAllocator backend
  Read cluster tags in the LUXI backend
  Read cluster tags in the RAPI backend
  Introduce support for reading the cluster tags
  Collapse the statistical functions into one
  Specialize the math functions
  Use conflicting primaries count in cluster score
  Node: add function for conflicting primary count
  Add a new node list field
  Add a command-line option to filter exclusion tags
  Introduce tag-based exclusion of primary instances
  Add a tags attribute to instances
  Small change in some list arguments
  Use either \- or \(hy in manpages
parents 9739b6b8 73b2e389
......@@ -59,6 +59,7 @@ module Ganeti.HTools.CLI
, oDiskMoves
, oDynuFile
, oTieredSpec
, oExTags
, oShowVer
, oShowHelp
) where
......@@ -106,6 +107,7 @@ data Options = Options
, optMdsk :: Double -- ^ Max disk usage ratio for nodes
, optDiskMoves :: Bool -- ^ Allow disk moves
, optDynuFile :: Maybe FilePath -- ^ Optional file with dynamic use data
, optExTags :: Maybe [String] -- ^ Tags to use for exclusion
, optVerbose :: Int -- ^ Verbosity level
, optShowVer :: Bool -- ^ Just show the program version
, optShowHelp :: Bool -- ^ Just show the help
......@@ -138,6 +140,7 @@ defaultOptions = Options
, optMdsk = -1
, optDiskMoves = True
, optDynuFile = Nothing
, optExTags = Nothing
, optVerbose = 1
, optShowVer = False
, optShowHelp = False
......@@ -293,6 +296,11 @@ oDynuFile = Option "U" ["dynu-file"]
(ReqArg (\ f opts -> Ok opts { optDynuFile = Just f }) "FILE")
"Import dynamic utilisation data from the given FILE"
oExTags :: OptType
oExTags = Option "" ["exclusion-tags"]
(ReqArg (\ f opts -> Ok opts { optExTags = Just $ sepSplit ',' f })
"TAG,...") "Enable instance exclusion based on given tag prefix"
oTieredSpec :: OptType
oTieredSpec = Option "" ["tiered-alloc"]
(ReqArg (\ inp opts -> do
......
......@@ -185,6 +185,7 @@ detailedCVNames = [ "free_mem_cv"
, "mem_load_cv"
, "disk_load_cv"
, "net_load_cv"
, "pri_tags_score"
]
-- | Compute the mem and disk covariance.
......@@ -225,9 +226,13 @@ compDetailedCV nl =
DynUtil c2 m2 d2 n2 = Node.utilPool n
in (c1/c2, m1/m2, d1/d2, n1/n2)
) nodes
-- metric: conflicting instance count
pri_tags_inst = sum $ map Node.conflictingPrimaries nodes
pri_tags_score = fromIntegral pri_tags_inst::Double
in [ mem_cv, dsk_cv, n1_score, res_cv, off_score, cpu_cv
, varianceCoeff c_load, varianceCoeff m_load
, varianceCoeff d_load, varianceCoeff n_load]
, varianceCoeff d_load, varianceCoeff n_load
, pri_tags_score ]
-- | Compute the /total/ variance.
compCV :: Node.List -> Double
......
......@@ -83,7 +83,7 @@ parseUtilisation line =
-- | External tool data loader from a variety of sources.
loadExternalData :: Options
-> IO (Node.List, Instance.List, String)
-> IO (Node.List, Instance.List, [String], String)
loadExternalData opts = do
(env_node, env_inst) <- parseEnv ()
let nodef = if optNodeSet opts then optNodeFile opts
......@@ -98,6 +98,10 @@ loadExternalData opts = do
setSim = isJust simdata
setFiles = optNodeSet opts || optInstSet opts
allSet = filter id [setRapi, setLuxi, setFiles]
exTags = case optExTags opts of
Nothing -> []
Just etl -> map (++ ":") etl
when (length allSet > 1) $
do
hPutStrLn stderr ("Error: Only one of the rapi, luxi, and data" ++
......@@ -126,8 +130,8 @@ loadExternalData opts = do
| setSim -> Simu.loadData $ fromJust simdata
| otherwise -> wrapIO $ Text.loadData nodef instf
let ldresult = input_data >>= Loader.mergeData util_data'
(loaded_nl, il, csf) <-
let ldresult = input_data >>= Loader.mergeData util_data' exTags
(loaded_nl, il, tags, csf) <-
(case ldresult of
Ok x -> return x
Bad s -> do
......@@ -140,4 +144,4 @@ loadExternalData opts = do
hPutStrLn stderr "Warning: cluster has inconsistent data:"
hPutStrLn stderr . unlines . map (printf " - %s") $ fix_msgs
return (fixed_nl, il, csf)
return (fixed_nl, il, tags, csf)
......@@ -52,8 +52,9 @@ parseBaseInstance n a = do
disk <- fromObj "disk_space_total" a
mem <- fromObj "memory" a
vcpus <- fromObj "vcpus" a
tags <- fromObj "tags" a
let running = "running"
return (n, Instance.create n mem disk vcpus running 0 0)
return (n, Instance.create n mem disk vcpus running tags 0 0)
-- | Parses an instance as found in the cluster instance listg.
parseInstance :: NameAssoc -- ^ The node name-to-index association list
......@@ -109,7 +110,9 @@ parseData body = do
iobj <- mapM (\(x,y) ->
asJSObject y >>= parseInstance ktn x . fromJSObject) idata
let (kti, il) = assignIndices iobj
(map_n, map_i, csf) <- mergeData [] (nl, il)
-- cluster tags
ctags <- fromObj "cluster_tags" obj
(map_n, map_i, ptags, csf) <- mergeData [] [] (nl, il, ctags)
req_nodes <- fromObj "required_nodes" request
optype <- fromObj "type" request
rqtype <-
......@@ -127,7 +130,7 @@ parseData body = do
ex_idex <- mapM (Container.findByName map_n) ex_nodes'
return $ Relocate ridx req_nodes (map Node.idx ex_idex)
other -> fail ("Invalid request type '" ++ other ++ "'")
return $ Request rqtype map_n map_i csf
return $ Request rqtype map_n map_i ptags csf
-- | Formats the response into a valid IAllocator response message.
formatResponse :: Bool -- ^ Whether the request was successful
......
......@@ -56,6 +56,7 @@ data Instance = Instance { name :: String -- ^ The instance name
, sNode :: T.Ndx -- ^ Original secondary node
, idx :: T.Idx -- ^ Internal index
, util :: T.DynUtil -- ^ Dynamic resource usage
, tags :: [String] -- ^ List of instance tags
} deriving (Show)
instance T.Element Instance where
......@@ -86,8 +87,9 @@ type List = Container.Container Instance
--
-- Some parameters are not initialized by function, and must be set
-- later (via 'setIdx' for example).
create :: String -> Int -> Int -> Int -> String -> T.Ndx -> T.Ndx -> Instance
create name_init mem_init dsk_init vcpus_init run_init pn sn =
create :: String -> Int -> Int -> Int -> String
-> [String] -> T.Ndx -> T.Ndx -> Instance
create name_init mem_init dsk_init vcpus_init run_init tags_init pn sn =
Instance { name = name_init
, mem = mem_init
, dsk = dsk_init
......@@ -98,6 +100,7 @@ create name_init mem_init dsk_init vcpus_init run_init pn sn =
, sNode = sn
, idx = -1
, util = T.baseUtil
, tags = tags_init
}
-- | Changes the index.
......
......@@ -48,6 +48,12 @@ import qualified Ganeti.HTools.Node as Node
import Ganeti.HTools.Types
-- * Constants
-- | The exclusion tag prefix
exTagsPrefix :: String
exTagsPrefix = "htools:iextags:"
-- * Types
{-| The request type.
......@@ -63,7 +69,7 @@ data RqType
deriving (Show)
-- | A complete request, as received from Ganeti.
data Request = Request RqType Node.List Instance.List String
data Request = Request RqType Node.List Instance.List [String] String
deriving (Show)
-- * Functions
......@@ -114,6 +120,14 @@ fixNodes accu inst =
in (sdx, snew):ac3
else ac2
-- | Remove non-selected tags from the exclusion list
filterExTags :: [String] -> Instance.Instance -> Instance.Instance
filterExTags tl inst =
let old_tags = Instance.tags inst
new_tags = filter (\tag -> any (\extag -> isPrefixOf extag tag) tl)
old_tags
in inst { Instance.tags = new_tags }
-- | Compute the longest common suffix of a list of strings that
-- | starts with a dot.
longestDomain :: [String] -> String
......@@ -128,14 +142,20 @@ longestDomain (x:xs) =
stripSuffix :: Int -> String -> String
stripSuffix sflen name = take (length name - sflen) name
-- | Extracts the exclusion tags from the cluster configuration
extractExTags :: [String] -> [String]
extractExTags =
map (drop (length exTagsPrefix)) .
filter (isPrefixOf exTagsPrefix)
-- | Initializer function that loads the data from a node and instance
-- list and massages it into the correct format.
mergeData :: [(String, DynUtil)] -- ^ Instance utilisation data
-> (Node.AssocList,
Instance.AssocList) -- ^ Data from either Text.loadData
-- or Rapi.loadData
-> Result (Node.List, Instance.List, String)
mergeData um (nl, il) =
-> [String] -- ^ Exclusion tags
-> (Node.AssocList, Instance.AssocList, [String])
-- ^ Data from backends
-> Result (Node.List, Instance.List, [String], String)
mergeData um extags (nl, il, tags) =
let il2 = Container.fromAssocList il
il3 = foldl' (\im (name, n_util) ->
case Container.findByName im name of
......@@ -144,16 +164,18 @@ mergeData um (nl, il) =
let new_i = inst { Instance.util = n_util }
in Container.add (Instance.idx inst) new_i im
) il2 um
nl2 = foldl' fixNodes nl (Container.elems il3)
allextags = extags ++ extractExTags tags
il4 = Container.map (filterExTags allextags) il3
nl2 = foldl' fixNodes nl (Container.elems il4)
nl3 = Container.fromAssocList
(map (\ (k, v) -> (k, Node.buildPeers v il3)) nl2)
(map (\ (k, v) -> (k, Node.buildPeers v il4)) nl2)
node_names = map (Node.name . snd) nl
inst_names = map (Instance.name . snd) il
common_suffix = longestDomain (node_names ++ inst_names)
csl = length common_suffix
snl = Container.map (\n -> setName n (stripSuffix csl $ nameOf n)) nl3
sil = Container.map (\i -> setName i (stripSuffix csl $ nameOf i)) il3
in Ok (snl, sil, common_suffix)
sil = Container.map (\i -> setName i (stripSuffix csl $ nameOf i)) il4
in Ok (snl, sil, tags, common_suffix)
-- | Checks the cluster data for consistency.
checkData :: Node.List -> Instance.List
......
......@@ -38,7 +38,7 @@ import Ganeti.HTools.Loader
import Ganeti.HTools.Types
import qualified Ganeti.HTools.Node as Node
import qualified Ganeti.HTools.Instance as Instance
import Ganeti.HTools.Utils (fromJVal, annotateResult)
import Ganeti.HTools.Utils (fromJVal, annotateResult, tryFromObj, asJSObject)
-- * Utility functions
......@@ -70,11 +70,15 @@ queryInstancesMsg =
let nnames = JSArray []
fnames = ["name",
"disk_usage", "be/memory", "be/vcpus",
"status", "pnode", "snodes"]
"status", "pnode", "snodes", "tags"]
fields = JSArray $ map (JSString . toJSString) fnames
use_locking = JSBool False
in JSArray [nnames, fields, use_locking]
-- | The input data for cluster query
queryClusterInfoMsg :: JSValue
queryClusterInfoMsg = JSArray []
-- | Wraper over callMethod doing node query.
queryNodes :: L.Client -> IO (Result JSValue)
queryNodes = L.callMethod L.QueryNodes queryNodesMsg
......@@ -83,6 +87,9 @@ queryNodes = L.callMethod L.QueryNodes queryNodesMsg
queryInstances :: L.Client -> IO (Result JSValue)
queryInstances = L.callMethod L.QueryInstances queryInstancesMsg
queryClusterInfo :: L.Client -> IO (Result JSValue)
queryClusterInfo = L.callMethod L.QueryClusterInfo queryClusterInfoMsg
-- | Parse a instance list in JSON format.
getInstances :: NameAssoc
-> JSValue
......@@ -93,7 +100,8 @@ getInstances ktn arr = toArray arr >>= mapM (parseInstance ktn)
parseInstance :: [(String, Ndx)]
-> JSValue
-> Result (String, Instance.Instance)
parseInstance ktn (JSArray (name:disk:mem:vcpus:status:pnode:snodes:[])) = do
parseInstance ktn (JSArray [ name, disk, mem, vcpus
, status, pnode, snodes, tags ]) = do
xname <- annotateResult "Parsing new instance" (fromJVal name)
let convert v = annotateResult ("Instance '" ++ xname ++ "'") (fromJVal v)
xdisk <- convert disk
......@@ -104,7 +112,9 @@ parseInstance ktn (JSArray (name:disk:mem:vcpus:status:pnode:snodes:[])) = do
snode <- (if null xsnodes then return Node.noSecondary
else lookupNode ktn xname (fromJSString $ head xsnodes))
xrunning <- convert status
let inst = Instance.create xname xmem xdisk xvcpus xrunning xpnode snode
xtags <- convert tags
let inst = Instance.create xname xmem xdisk xvcpus
xrunning xtags xpnode snode
return (xname, inst)
parseInstance _ v = fail ("Invalid instance query result: " ++ show v)
......@@ -115,8 +125,8 @@ getNodes arr = toArray arr >>= mapM parseNode
-- | Construct a node from a JSON object.
parseNode :: JSValue -> Result (String, Node.Node)
parseNode (JSArray
(name:mtotal:mnode:mfree:dtotal:dfree:ctotal:offline:drained:[]))
parseNode (JSArray [ name, mtotal, mnode, mfree, dtotal, dfree
, ctotal, offline, drained ])
= do
xname <- annotateResult "Parsing new node" (fromJVal name)
let convert v = annotateResult ("Node '" ++ xname ++ "'") (fromJVal v)
......@@ -137,11 +147,18 @@ parseNode (JSArray
parseNode v = fail ("Invalid node query result: " ++ show v)
getClusterTags :: JSValue -> Result [String]
getClusterTags v = do
let errmsg = "Parsing cluster info"
obj <- annotateResult errmsg $ asJSObject v
tags <- tryFromObj errmsg (fromJSObject obj) "tag"
return tags
-- * Main loader functionality
-- | Builds the cluster data from an URL.
loadData :: String -- ^ Unix socket to use as source
-> IO (Result (Node.AssocList, Instance.AssocList))
-> IO (Result (Node.AssocList, Instance.AssocList, [String]))
loadData master =
E.bracket
(L.getClient master)
......@@ -149,10 +166,12 @@ loadData master =
(\s -> do
nodes <- queryNodes s
instances <- queryInstances s
cinfo <- queryClusterInfo s
return $ do -- Result monad
node_data <- nodes >>= getNodes
let (node_names, node_idx) = assignIndices node_data
inst_data <- instances >>= getInstances node_names
let (_, inst_idx) = assignIndices inst_data
return (node_idx, inst_idx)
ctags <- cinfo >>= getClusterTags
return (node_idx, inst_idx, ctags)
)
......@@ -48,6 +48,7 @@ module Ganeti.HTools.Node
, addSec
-- * Stats
, availDisk
, conflictingPrimaries
-- * Formatting
, defaultFields
, showHeader
......@@ -59,6 +60,8 @@ module Ganeti.HTools.Node
) where
import Data.List
import qualified Data.Map as Map
import qualified Data.Foldable as Foldable
import Text.Printf (printf)
import qualified Ganeti.HTools.Container as Container
......@@ -69,6 +72,9 @@ import qualified Ganeti.HTools.Types as T
-- * Type declarations
-- | The tag map type
type TagMap = Map.Map String Int
-- | The node type.
data Node = Node
{ name :: String -- ^ The node name
......@@ -102,6 +108,7 @@ data Node = Node
-- computations
, utilPool :: T.DynUtil -- ^ Total utilisation capacity
, utilLoad :: T.DynUtil -- ^ Sum of instance utilisation
, pTags :: TagMap -- ^ Map of primary instance tags and their count
} deriving (Show)
instance T.Element Node where
......@@ -128,6 +135,39 @@ noLimit = -1
noLimitInt :: Int
noLimitInt = -1
-- * Helper functions
-- | Add a tag to a tagmap
addTag :: TagMap -> String -> TagMap
addTag t s = Map.insertWith (+) s 1 t
-- | Add multiple tags
addTags :: TagMap -> [String] -> TagMap
addTags = foldl' addTag
-- | Adjust or delete a tag from a tagmap
delTag :: TagMap -> String -> TagMap
delTag t s = Map.update (\v -> if v > 1
then Just (v-1)
else Nothing)
s t
-- | Remove multiple tags
delTags :: TagMap -> [String] -> TagMap
delTags = foldl' delTag
-- | Check if we can add a list of tags to a tagmap
rejectAddTags :: TagMap -> [String] -> Bool
rejectAddTags t = any (flip Map.member t)
-- | Check how many primary instances have conflicting tags. The
-- algorithm to compute this is to sum the count of all tags, then
-- subtract the size of the tag map (since each tag has at least one,
-- non-conflicting instance); this is equivalent to summing the
-- values in the tag map minus one.
conflictingPrimaries :: Node -> Int
conflictingPrimaries (Node { pTags = t }) = Foldable.sum t - Map.size t
-- * Initialization functions
-- | Create a new node.
......@@ -164,6 +204,7 @@ create name_init mem_t_init mem_n_init mem_f_init
, hiCpu = noLimitInt
, utilPool = T.baseUtil
, utilLoad = T.zeroUtil
, pTags = Map.empty
}
-- | Changes the index.
......@@ -215,12 +256,13 @@ buildPeers t il =
in t {peers=pmap, failN1 = new_failN1, rMem = new_rmem, pRem = new_prem}
-- | Assigns an instance to a node as primary and update the used VCPU
-- count and utilisation data.
-- count, utilisation data and tags map.
setPri :: Node -> Instance.Instance -> Node
setPri t inst = t { pList = Instance.idx inst:pList t
, uCpu = new_count
, pCpu = fromIntegral new_count / tCpu t
, utilLoad = utilLoad t `T.addUtil` Instance.util inst
, pTags = addTags (pTags t) (Instance.tags inst)
}
where new_count = uCpu t + Instance.vcpus inst
......@@ -256,7 +298,8 @@ removePri t inst =
new_load = utilLoad t `T.subUtil` Instance.util inst
in t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
, failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
, uCpu = new_ucpu, pCpu = new_rcpu, utilLoad = new_load }
, uCpu = new_ucpu, pCpu = new_rcpu, utilLoad = new_load
, pTags = delTags (pTags t) (Instance.tags inst) }
-- | Removes a secondary instance.
removeSec :: Node -> Instance.Instance -> Node
......@@ -295,16 +338,21 @@ addPri t inst =
new_dp = fromIntegral new_dsk / tDsk t
l_cpu = mCpu t
new_load = utilLoad t `T.addUtil` Instance.util inst
inst_tags = Instance.tags inst
old_tags = pTags t
in if new_mem <= 0 then T.OpFail T.FailMem
else if new_dsk <= 0 || mDsk t > new_dp then T.OpFail T.FailDisk
else if new_failn1 && not (failN1 t) then T.OpFail T.FailMem
else if l_cpu >= 0 && l_cpu < new_pcpu then T.OpFail T.FailCPU
else if rejectAddTags old_tags inst_tags
then T.OpFail T.FailTags
else
let new_plist = iname:pList t
new_mp = fromIntegral new_mem / tMem t
r = t { pList = new_plist, fMem = new_mem, fDsk = new_dsk
, failN1 = new_failn1, pMem = new_mp, pDsk = new_dp
, uCpu = new_ucpu, pCpu = new_pcpu, utilLoad = new_load }
, uCpu = new_ucpu, pCpu = new_pcpu, utilLoad = new_load
, pTags = addTags old_tags inst_tags }
in T.OpGood r
-- | Adds a secondary instance.
......@@ -374,6 +422,8 @@ showField t field =
"mload" -> printf "%5.3f" uM
"dload" -> printf "%5.3f" uD
"nload" -> printf "%5.3f" uN
"ptags" -> intercalate "," . map (\(k, v) -> printf "%s=%d" k v) .
Map.toList $ pTags t
_ -> printf "<unknown field>"
where
T.DynUtil { T.cpuWeight = uC, T.memWeight = uM,
......@@ -406,6 +456,7 @@ showHeader field =
"mload" -> ("lMem", True)
"dload" -> ("lDsk", True)
"nload" -> ("lNet", True)
"ptags" -> ("PrimaryTags", False)
_ -> ("<unknown field>", False)
-- | String converter for the node list functionality.
......
......@@ -33,7 +33,7 @@ import Network.Curl.Types ()
import Network.Curl.Code
import Data.List
import Control.Monad
import Text.JSON (JSObject, JSValue, fromJSObject)
import Text.JSON (JSObject, JSValue, fromJSObject, decodeStrict)
import Text.Printf (printf)
import Ganeti.HTools.Utils
......@@ -88,7 +88,8 @@ parseInstance ktn a = do
snode <- (if null snodes then return Node.noSecondary
else readEitherString (head snodes) >>= lookupNode ktn name)
running <- extract "status" a
let inst = Instance.create name mem disk vcpus running pnode snode
tags <- extract "tags" a
let inst = Instance.create name mem disk vcpus running tags pnode snode
return (name, inst)
-- | Construct a node from a JSON object.
......@@ -113,14 +114,16 @@ parseNode a = do
-- | Builds the cluster data from an URL.
loadData :: String -- ^ Cluster or URL to use as source
-> IO (Result (Node.AssocList, Instance.AssocList))
-> IO (Result (Node.AssocList, Instance.AssocList, [String]))
loadData master = do -- IO monad
let url = formatHost master
node_body <- getUrl $ printf "%s/2/nodes?bulk=1" url
inst_body <- getUrl $ printf "%s/2/instances?bulk=1" url
tags_body <- getUrl $ printf "%s/2/tags" url
return $ do -- Result monad
node_data <- node_body >>= getNodes
let (node_names, node_idx) = assignIndices node_data
inst_data <- inst_body >>= getInstances node_names
let (_, inst_idx) = assignIndices inst_data
return (node_idx, inst_idx)
tags_data <- tags_body >>= (fromJResult . decodeStrict)
return (node_idx, inst_idx, tags_data)
......@@ -52,7 +52,7 @@ parseDesc desc =
-- | Builds the cluster data from node\/instance files.
loadData :: String -- ^ Cluster description in text format
-> IO (Result (Node.AssocList, Instance.AssocList))
-> IO (Result (Node.AssocList, Instance.AssocList, [String]))
loadData ndata = -- IO monad, just for consistency with the other loaders
return $ do
(cnt, disk, mem, cpu) <- parseDesc ndata
......@@ -63,4 +63,4 @@ loadData ndata = -- IO monad, just for consistency with the other loaders
(fromIntegral cpu) False
in (idx, Node.setIdx n idx)
) [1..cnt]
return (nodes, [])
return (nodes, [], [])
......@@ -42,7 +42,7 @@ import qualified Ganeti.HTools.Instance as Instance
-- | Load a node from a field list.
loadNode :: (Monad m) => [String] -> m (String, Node.Node)
loadNode (name:tm:nm:fm:td:fd:tc:fo:[]) = do
loadNode [name, tm, nm, fm, td, fd, tc, fo] = do
new_node <-
if any (== "?") [tm,nm,fm,td,fd,tc] || fo == "Y" then
return $ Node.create name 0 0 0 0 0 0 True
......@@ -60,7 +60,7 @@ loadNode s = fail $ "Invalid/incomplete node data: '" ++ show s ++ "'"
-- | Load an instance from a field list.
loadInst :: (Monad m) =>
[(String, Ndx)] -> [String] -> m (String, Instance.Instance)
loadInst ktn (name:mem:dsk:vcpus:status:pnode:snode:[]) = do
loadInst ktn [name, mem, dsk, vcpus, status, pnode, snode, tags] = do
pidx <- lookupNode ktn name pnode
sidx <- (if null snode then return Node.noSecondary
else lookupNode ktn name snode)
......@@ -69,7 +69,8 @@ loadInst ktn (name:mem:dsk:vcpus:status:pnode:snode:[]) = do
vvcpus <- tryRead name vcpus
when (sidx == pidx) $ fail $ "Instance " ++ name ++
" has same primary and secondary node - " ++ pnode
let newinst = Instance.create name vmem vdsk vvcpus status pidx sidx
let vtags = sepSplit ',' tags
newinst = Instance.create name vmem vdsk vvcpus status vtags pidx sidx
return (name, newinst)
loadInst _ s = fail $ "Invalid/incomplete instance data: '" ++ show s ++ "'"
......@@ -90,7 +91,7 @@ loadTabular text_data convert_fn = do
-- | Builds the cluster data from node\/instance files.
loadData :: String -- ^ Node data in string format
-> String -- ^ Instance data in string format
-> IO (Result (Node.AssocList, Instance.AssocList))
-> IO (Result (Node.AssocList, Instance.AssocList, [String]))
loadData nfile ifile = do -- IO monad
ndata <- readFile nfile
idata <- readFile ifile
......@@ -99,4 +100,4 @@ loadData nfile ifile = do -- IO monad
(ktn, nl) <- loadTabular ndata loadNode
{- instance file: name mem disk status pnode snode -}
(_, il) <- loadTabular idata (loadInst ktn)
return (nl, il)
return (nl, il, [])
......@@ -147,6 +147,7 @@ data FailMode = FailMem -- ^ Failed due to not enough RAM
| FailDisk -- ^ Failed due to not enough disk
| FailCPU -- ^ Failed due to not enough CPU capacity
| FailN1 -- ^ Failed due to not passing N1 checks
| FailTags -- ^ Failed due to tag exclusion
deriving (Eq, Enum, Bounded, Show)
-- | List with failure statistics
......
......@@ -78,23 +78,18 @@ fst3 (a, _, _) = a
-- * Mathematical functions
-- Simple and slow statistical functions, please replace with better versions
-- | Mean value of a list.
meanValue :: Floating a => [a] -> a
meanValue lst = sum lst / fromIntegral (length lst)
-- | Standard deviation.
stdDev :: Floating a => [a] -> a
stdDev lst =
let mv = meanValue lst
-- Simple and slow statistical functions, please replace with better
-- versions
-- | The covariance of the list
varianceCoeff :: [Double] -> Double
varianceCoeff lst =
let ll = fromIntegral (length lst)::Double -- length of list
mv = sum lst / ll -- mean value
av = foldl' (\accu em -> let d = em - mv in accu + d * d) 0.0 lst