diff --git a/htools/Ganeti/HTools/CLI.hs b/htools/Ganeti/HTools/CLI.hs
index 33cacd94148db5fd31d4fe730ba74f714e4391d5..dd0d3923aaa8cacf12bb6b984699160597087ab1 100644
--- a/htools/Ganeti/HTools/CLI.hs
+++ b/htools/Ganeti/HTools/CLI.hs
@@ -1,8 +1,8 @@
 {-| Implementation of command-line functions.
 
-This module holds the common cli-related functions for the binaries,
-separated into this module since Utils.hs is used in many other places
-and this is more IO oriented.
+This module holds the common command-line related functions for the
+binaries, separated into this module since "Ganeti.HTools.Utils" is
+used in many other places and this is more IO oriented.
 
 -}
 
@@ -89,10 +89,16 @@ import qualified Ganeti.Constants as C
 import Ganeti.HTools.Types
 import Ganeti.HTools.Utils
 
--- | The default value for the luxi socket
+-- * Constants
+
+-- | The default value for the luxi socket.
+--
+-- This is re-exported from the "Ganeti.Constants" module.
 defaultLuxiSocket :: FilePath
 defaultLuxiSocket = C.masterSocket
 
+-- * Data types
+
 -- | Command line options structure.
 data Options = Options
     { optDataFile    :: Maybe FilePath -- ^ Path to the cluster data file
@@ -170,9 +176,11 @@ defaultOptions  = Options
  , optVerbose     = 1
  }
 
--- | Abrreviation for the option type
+-- | Abrreviation for the option type.
 type OptType = OptDescr (Options -> Result Options)
 
+-- * Command line options
+
 oDataFile :: OptType
 oDataFile = Option "t" ["text-data"]
             (ReqArg (\ f o -> Ok o { optDataFile = Just f }) "FILE")
@@ -394,13 +402,15 @@ oVerbose = Option "v" ["verbose"]
            (NoArg (\ opts -> Ok opts { optVerbose = optVerbose opts + 1 }))
            "increase the verbosity level"
 
--- | Usage info
+-- * Functions
+
+-- | Usage info.
 usageHelp :: String -> [OptType] -> String
 usageHelp progname =
     usageInfo (printf "%s %s\nUsage: %s [OPTION...]"
                progname Version.version progname)
 
--- | Command line parser, using the 'options' structure.
+-- | Command line parser, using the 'Options' structure.
 parseOpts :: [String]               -- ^ The command line arguments
           -> String                 -- ^ The program name
           -> [OptType]              -- ^ The supported command line options
diff --git a/htools/Ganeti/HTools/Cluster.hs b/htools/Ganeti/HTools/Cluster.hs
index c2b79089f4e73574bda5f60b7a6982982004f1e3..95178e46a72ca7bbde83ad0100c2c48b8804eb5a 100644
--- a/htools/Ganeti/HTools/Cluster.hs
+++ b/htools/Ganeti/HTools/Cluster.hs
@@ -1,7 +1,7 @@
 {-| Implementation of cluster-wide logic.
 
 This module holds all pure cluster-logic; I\/O related functionality
-goes into the "Main" module for the individual binaries.
+goes into the /Main/ module for the individual binaries.
 
 -}
 
@@ -106,17 +106,18 @@ type AllocResult = (FailStats, Node.List, Instance.List,
 
 
 -- | A type denoting the valid allocation mode/pairs.
+--
 -- For a one-node allocation, this will be a @Left ['Node.Node']@,
 -- whereas for a two-node allocation, this will be a @Right
 -- [('Node.Node', 'Node.Node')]@.
 type AllocNodes = Either [Ndx] [(Ndx, Ndx)]
 
--- | The empty solution we start with when computing allocations
+-- | The empty solution we start with when computing allocations.
 emptySolution :: AllocSolution
 emptySolution = AllocSolution { asFailures = [], asAllocs = 0
                               , asSolutions = [], asLog = [] }
 
--- | The complete state for the balancing solution
+-- | The complete state for the balancing solution.
 data Table = Table Node.List Instance.List Score [Placement]
              deriving (Show, Read)
 
@@ -144,7 +145,7 @@ data CStats = CStats { csFmem :: Integer -- ^ Cluster free mem
                      }
             deriving (Show, Read)
 
--- | Currently used, possibly to allocate, unallocable
+-- | Currently used, possibly to allocate, unallocable.
 type AllocStats = (RSpec, RSpec, RSpec)
 
 -- * Utility functions
@@ -170,11 +171,11 @@ computeBadItems nl il =
   in
     (bad_nodes, bad_instances)
 
--- | Zero-initializer for the CStats type
+-- | Zero-initializer for the CStats type.
 emptyCStats :: CStats
 emptyCStats = CStats 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 
--- | Update stats with data from a new node
+-- | Update stats with data from a new node.
 updateCStats :: CStats -> Node.Node -> CStats
 updateCStats cs node =
     let CStats { csFmem = x_fmem, csFdsk = x_fdsk,
@@ -243,7 +244,7 @@ computeAllocationDelta cini cfin =
                (truncate t_dsk - fromIntegral f_idsk)
     in (rini, rfin, runa)
 
--- | The names and weights of the individual elements in the CV list
+-- | The names and weights of the individual elements in the CV list.
 detailedCVInfo :: [(Double, String)]
 detailedCVInfo = [ (1,  "free_mem_cv")
                  , (1,  "free_disk_cv")
@@ -311,11 +312,11 @@ compDetailedCV nl =
 compCV :: Node.List -> Double
 compCV = sum . zipWith (*) detailedCVWeights . compDetailedCV
 
--- | Compute online nodes from a Node.List
+-- | Compute online nodes from a 'Node.List'.
 getOnline :: Node.List -> [Node.Node]
 getOnline = filter (not . Node.offline) . Container.elems
 
--- * hbal functions
+-- * Balancing functions
 
 -- | Compute best table. Note that the ordering of the arguments is important.
 compareTables :: Table -> Table -> Table
@@ -534,7 +535,7 @@ checkMove nodes_idx disk_moves inst_moves ini_tbl victims =
        then ini_tbl -- no advancement
        else best_tbl
 
--- | Check if we are allowed to go deeper in the balancing
+-- | Check if we are allowed to go deeper in the balancing.
 doNextBalance :: Table     -- ^ The starting table
               -> Int       -- ^ Remaining length
               -> Score     -- ^ Score at which to stop
@@ -544,7 +545,7 @@ doNextBalance ini_tbl max_rounds min_score =
         ini_plc_len = length ini_plc
     in (max_rounds < 0 || ini_plc_len < max_rounds) && ini_cv > min_score
 
--- | Run a balance move
+-- | Run a balance move.
 tryBalance :: Table       -- ^ The starting table
            -> Bool        -- ^ Allow disk moves
            -> Bool        -- ^ Allow instance moves
@@ -574,13 +575,13 @@ tryBalance ini_tbl disk_moves inst_moves evac_mode mg_limit min_gain =
 
 -- * Allocation functions
 
--- | Build failure stats out of a list of failures
+-- | Build failure stats out of a list of failures.
 collapseFailures :: [FailMode] -> FailStats
 collapseFailures flst =
     map (\k -> (k, length $ filter (k ==) flst)) [minBound..maxBound]
 
 -- | Update current Allocation solution and failure stats with new
--- elements
+-- elements.
 concatAllocs :: AllocSolution -> OpResult Node.AllocElement -> AllocSolution
 concatAllocs as (OpFail reason) = as { asFailures = reason : asFailures as }
 
@@ -611,7 +612,7 @@ sumAllocs :: AllocSolution -> AllocSolution -> AllocSolution
 sumAllocs (AllocSolution af aa as al) (AllocSolution bf ba bs bl) =
     AllocSolution (af ++ bf) (aa + ba) (as ++ bs) (al ++ bl)
 
--- | Given a solution, generates a reasonable description for it
+-- | Given a solution, generates a reasonable description for it.
 describeSolution :: AllocSolution -> String
 describeSolution as =
   let fcnt = asFailures as
@@ -629,7 +630,7 @@ describeSolution as =
                      " for node(s) %s") cv (asAllocs as) (length fcnt) freasons
              (intercalate "/" . map Node.name $ nodes)
 
--- | Annotates a solution with the appropriate string
+-- | Annotates a solution with the appropriate string.
 annotateSolution :: AllocSolution -> AllocSolution
 annotateSolution as = as { asLog = describeSolution as : asLog as }
 
@@ -678,7 +679,7 @@ tryAlloc nl _ inst (Left all_nodes) =
        then fail "No online nodes"
        else return $ annotateSolution sols
 
--- | Given a group/result, describe it as a nice (list of) messages
+-- | Given a group/result, describe it as a nice (list of) messages.
 solutionDescription :: Group.List -> (Gdx, Result AllocSolution) -> [String]
 solutionDescription gl (groupId, result) =
   case result of
@@ -690,7 +691,7 @@ solutionDescription gl (groupId, result) =
 
 -- | From a list of possibly bad and possibly empty solutions, filter
 -- only the groups with a valid result. Note that the result will be
--- reversed compared to the original list
+-- reversed compared to the original list.
 filterMGResults :: Group.List
                 -> [(Gdx, Result AllocSolution)]
                 -> [(Gdx, AllocSolution)]
@@ -703,7 +704,7 @@ filterMGResults gl = foldl' fn []
                        | unallocable gdx -> accu
                        | otherwise -> (gdx, sol):accu
 
--- | Sort multigroup results based on policy and score
+-- | Sort multigroup results based on policy and score.
 sortMGResults :: Group.List
              -> [(Gdx, AllocSolution)]
              -> [(Gdx, AllocSolution)]
@@ -782,7 +783,7 @@ tryMGReloc _ mgnl mgil xid ncount ex_ndx = do
                 Just v -> return v
   tryReloc nl il xid ncount ex_ndx
 
--- | Change an instance's secondary node
+-- | Change an instance's secondary node.
 evacInstance :: (Monad m) =>
                 [Ndx]                      -- ^ Excluded nodes
              -> Instance.List              -- ^ The current instance list
@@ -854,7 +855,7 @@ tryMGEvac _ nl il ex_ndx =
       let sol = foldl' sumAllocs emptySolution results
       return $ annotateSolution sol
 
--- | Recursively place instances on the cluster until we're out of space
+-- | Recursively place instances on the cluster until we're out of space.
 iterateAlloc :: Node.List
              -> Instance.List
              -> Instance.Instance
@@ -879,7 +880,7 @@ iterateAlloc nl il newinst allocnodes ixes cstats =
                  _ -> Bad "Internal error: multiple solutions for single\
                           \ allocation"
 
--- | The core of the tiered allocation mode
+-- | The core of the tiered allocation mode.
 tieredAlloc :: Node.List
             -> Instance.List
             -> Instance.Instance
@@ -1072,7 +1073,7 @@ iMoveToJob nl il idx move =
 
 -- * Node group functions
 
--- | Computes the group of an instance
+-- | Computes the group of an instance.
 instanceGroup :: Node.List -> Instance.Instance -> Result Gdx
 instanceGroup nl i =
   let sidx = Instance.sNode i
@@ -1087,19 +1088,19 @@ instanceGroup nl i =
                 show pgroup ++ ", secondary " ++ show sgroup)
      else return pgroup
 
--- | Computes the group of an instance per the primary node
+-- | Computes the group of an instance per the primary node.
 instancePriGroup :: Node.List -> Instance.Instance -> Gdx
 instancePriGroup nl i =
   let pnode = Container.find (Instance.pNode i) nl
   in  Node.group pnode
 
 -- | Compute the list of badly allocated instances (split across node
--- groups)
+-- groups).
 findSplitInstances :: Node.List -> Instance.List -> [Instance.Instance]
 findSplitInstances nl =
   filter (not . isOk . instanceGroup nl) . Container.elems
 
--- | Splits a cluster into the component node groups
+-- | Splits a cluster into the component node groups.
 splitCluster :: Node.List -> Instance.List ->
                 [(Gdx, (Node.List, Instance.List))]
 splitCluster nl il =
diff --git a/htools/Ganeti/HTools/ExtLoader.hs b/htools/Ganeti/HTools/ExtLoader.hs
index b7ca04afdaca1da38bd056fdc1bd2811b7578ce0..74f68381d6b960dca1420b1417dc97eacc8d5605 100644
--- a/htools/Ganeti/HTools/ExtLoader.hs
+++ b/htools/Ganeti/HTools/ExtLoader.hs
@@ -1,4 +1,4 @@
-{-| External data loader
+{-| External data loader.
 
 This module holds the external data loading, and thus is the only one
 depending (via the specialized Text\/Rapi\/Luxi modules) on the actual
@@ -51,7 +51,7 @@ import Ganeti.HTools.Types
 import Ganeti.HTools.CLI
 import Ganeti.HTools.Utils (sepSplit, tryRead)
 
--- | Error beautifier
+-- | Error beautifier.
 wrapIO :: IO (Result a) -> IO (Result a)
 wrapIO = flip catch (return . Bad . show)
 
diff --git a/htools/Ganeti/HTools/IAlloc.hs b/htools/Ganeti/HTools/IAlloc.hs
index 7012173d526a506874f6a2db4c32aa01b6eafc3b..8801949f552f88e22e4c277d7a0327f31bcad590 100644
--- a/htools/Ganeti/HTools/IAlloc.hs
+++ b/htools/Ganeti/HTools/IAlloc.hs
@@ -60,7 +60,7 @@ parseBaseInstance n a = do
   let running = "running"
   return (n, Instance.create n mem disk vcpus running tags True 0 0)
 
--- | Parses an instance as found in the cluster instance listg.
+-- | Parses an instance as found in the cluster instance list.
 parseInstance :: NameAssoc -- ^ The node name-to-index association list
               -> String    -- ^ The name of the instance
               -> JSRecord  -- ^ The JSON object
diff --git a/htools/Ganeti/HTools/Instance.hs b/htools/Ganeti/HTools/Instance.hs
index 759f62255948c0b418f2e1740303236455ee125c..e21c30ef815b2eeb0e453f1b274bd40adaff43ce 100644
--- a/htools/Ganeti/HTools/Instance.hs
+++ b/htools/Ganeti/HTools/Instance.hs
@@ -49,7 +49,7 @@ import qualified Ganeti.Constants as C
 
 -- * Type declarations
 
--- | The instance type
+-- | The instance type.
 data Instance = Instance
     { name         :: String    -- ^ The instance name
     , alias        :: String    -- ^ The shortened name
@@ -74,7 +74,7 @@ instance T.Element Instance where
     setIdx   = setIdx
     allNames n = [name n, alias n]
 
--- | Running instance states.
+-- | Constant holding the running instance states.
 runningStates :: [String]
 runningStates = [C.inststRunning, C.inststErrorup]
 
diff --git a/htools/Ganeti/HTools/Loader.hs b/htools/Ganeti/HTools/Loader.hs
index 7f8f551b9749d3ff6c042dc6140b64df4789382d..89b479d8e75c3d53ad9022a80abc8754c0093d39 100644
--- a/htools/Ganeti/HTools/Loader.hs
+++ b/htools/Ganeti/HTools/Loader.hs
@@ -1,4 +1,4 @@
-{-| Generic data loader
+{-| Generic data loader.
 
 This module holds the common code for parsing the input data after it
 has been loaded from external sources.
@@ -54,7 +54,7 @@ import Ganeti.HTools.Types
 
 -- * Constants
 
--- | The exclusion tag prefix
+-- | The exclusion tag prefix.
 exTagsPrefix :: String
 exTagsPrefix = "htools:iextags:"
 
@@ -147,7 +147,7 @@ fixNodes accu inst =
            in Container.add sdx snew ac2
       else ac2
 
--- | Remove non-selected tags from the exclusion list
+-- | Remove non-selected tags from the exclusion list.
 filterExTags :: [String] -> Instance.Instance -> Instance.Instance
 filterExTags tl inst =
     let old_tags = Instance.tags inst
@@ -155,7 +155,7 @@ filterExTags tl inst =
                    old_tags
     in inst { Instance.tags = new_tags }
 
--- | Update the movable attribute
+-- | Update the movable attribute.
 updateMovable :: [String]           -- ^ Selected instances (if not empty)
               -> [String]           -- ^ Excluded instances
               -> Instance.Instance  -- ^ Target Instance
@@ -168,7 +168,7 @@ updateMovable selinsts exinsts inst =
     else inst
 
 -- | Compute the longest common suffix of a list of strings that
--- | starts with a dot.
+-- starts with a dot.
 longestDomain :: [String] -> String
 longestDomain [] = ""
 longestDomain (x:xs) =
@@ -177,13 +177,13 @@ longestDomain (x:xs) =
                               else accu)
       "" $ filter (isPrefixOf ".") (tails x)
 
--- | Extracts the exclusion tags from the cluster configuration
+-- | Extracts the exclusion tags from the cluster configuration.
 extractExTags :: [String] -> [String]
 extractExTags =
     map (drop (length exTagsPrefix)) .
     filter (isPrefixOf exTagsPrefix)
 
--- | Extracts the common suffix from node\/instance names
+-- | Extracts the common suffix from node\/instance names.
 commonSuffix :: Node.List -> Instance.List -> String
 commonSuffix nl il =
     let node_names = map Node.name $ Container.elems nl
diff --git a/htools/Ganeti/HTools/Luxi.hs b/htools/Ganeti/HTools/Luxi.hs
index 28128fc03ef4187cefb935cbe8340d3284c9861c..1fd3bf3df7ddfa7ed0f9f087365f7c2e3c634f3a 100644
--- a/htools/Ganeti/HTools/Luxi.hs
+++ b/htools/Ganeti/HTools/Luxi.hs
@@ -187,7 +187,7 @@ parseGroup v = fail ("Invalid group query result: " ++ show v)
 
 -- * Main loader functionality
 
--- | Builds the cluster data from an URL.
+-- | Builds the cluster data by querying a given socket name.
 readData :: String -- ^ Unix socket to use as source
          -> IO (Result JSValue, Result JSValue, Result JSValue, Result JSValue)
 readData master =
@@ -202,6 +202,8 @@ readData master =
           return (groups, nodes, instances, cinfo)
        )
 
+-- | Converts the output of 'readData' into the internal cluster
+-- representation.
 parseData :: (Result JSValue, Result JSValue, Result JSValue, Result JSValue)
           -> Result ClusterData
 parseData (groups, nodes, instances, cinfo) = do
@@ -214,7 +216,7 @@ parseData (groups, nodes, instances, cinfo) = do
   ctags <- cinfo >>= getClusterTags
   return (ClusterData group_idx node_idx inst_idx ctags)
 
--- | Top level function for data loading
+-- | Top level function for data loading.
 loadData :: String -- ^ Unix socket to use as source
          -> IO (Result ClusterData)
 loadData = fmap parseData . readData
diff --git a/htools/Ganeti/HTools/Node.hs b/htools/Ganeti/HTools/Node.hs
index 65237e5a24ea1b4a0be97578bbc6e6bcd11582a0..5394904ad500b9763eeaa42104b6e85c3abd8624 100644
--- a/htools/Ganeti/HTools/Node.hs
+++ b/htools/Ganeti/HTools/Node.hs
@@ -85,7 +85,7 @@ import qualified Ganeti.HTools.Types as T
 
 -- * Type declarations
 
--- | The tag map type
+-- | The tag map type.
 type TagMap = Map.Map String Int
 
 -- | The node type.
@@ -140,7 +140,7 @@ type AssocList = [(T.Ndx, Node)]
 type List = Container.Container Node
 
 -- | A simple name for an allocation element (here just for logistic
--- reasons)
+-- reasons).
 type AllocElement = (List, Instance.Instance, [Node], T.Score)
 
 -- | Constant node index for a non-moveable instance.
@@ -149,26 +149,26 @@ noSecondary = -1
 
 -- * Helper functions
 
--- | Add a tag to a tagmap
+-- | Add a tag to a tagmap.
 addTag :: TagMap -> String -> TagMap
 addTag t s = Map.insertWith (+) s 1 t
 
--- | Add multiple tags
+-- | Add multiple tags.
 addTags :: TagMap -> [String] -> TagMap
 addTags = foldl' addTag
 
--- | Adjust or delete a tag from a tagmap
+-- | Adjust or delete a tag from a tagmap.
 delTag :: TagMap -> String -> TagMap
 delTag t s = Map.update (\v -> if v > 1
                                then Just (v-1)
                                else Nothing)
              s t
 
--- | Remove multiple tags
+-- | Remove multiple tags.
 delTags :: TagMap -> [String] -> TagMap
 delTags = foldl' delTag
 
--- | Check if we can add a list of tags to a tagmap
+-- | Check if we can add a list of tags to a tagmap.
 rejectAddTags :: TagMap -> [String] -> Bool
 rejectAddTags t = any (`Map.member` t)
 
@@ -221,11 +221,11 @@ create name_init mem_t_init mem_n_init mem_f_init
          , group = group_init
          }
 
--- | Conversion formula from mDsk\/tDsk to loDsk
+-- | Conversion formula from mDsk\/tDsk to loDsk.
 mDskToloDsk :: Double -> Double -> Int
 mDskToloDsk mval tdsk = floor (mval * tdsk)
 
--- | Conversion formula from mCpu\/tCpu to hiCpu
+-- | Conversion formula from mCpu\/tCpu to hiCpu.
 mCpuTohiCpu :: Double -> Double -> Int
 mCpuTohiCpu mval tcpu = floor (mval * tcpu)
 
@@ -249,11 +249,11 @@ setOffline t val = t { offline = val }
 setXmem :: Node -> Int -> Node
 setXmem t val = t { xMem = val }
 
--- | Sets the max disk usage ratio
+-- | Sets the max disk usage ratio.
 setMdsk :: Node -> Double -> Node
 setMdsk t val = t { mDsk = val, loDsk = mDskToloDsk val (tDsk t) }
 
--- | Sets the max cpu usage ratio
+-- | Sets the max cpu usage ratio.
 setMcpu :: Node -> Double -> Node
 setMcpu t val = t { mCpu = val, hiCpu = mCpuTohiCpu val (tCpu t) }
 
@@ -435,7 +435,7 @@ addSecEx force t inst pdx =
 
 -- * Stats functions
 
--- | Computes the amount of available disk on a given node
+-- | Computes the amount of available disk on a given node.
 availDisk :: Node -> Int
 availDisk t =
     let _f = fDsk t
@@ -444,11 +444,11 @@ availDisk t =
        then 0
        else _f - _l
 
--- | Computes the amount of used disk on a given node
+-- | Computes the amount of used disk on a given node.
 iDsk :: Node -> Int
 iDsk t = truncate (tDsk t) - fDsk t
 
--- | Computes the amount of available memory on a given node
+-- | Computes the amount of available memory on a given node.
 availMem :: Node -> Int
 availMem t =
     let _f = fMem t
@@ -457,7 +457,7 @@ availMem t =
        then 0
        else _f - _l
 
--- | Computes the amount of available memory on a given node
+-- | Computes the amount of available memory on a given node.
 availCpu :: Node -> Int
 availCpu t =
     let _u = uCpu t
@@ -472,7 +472,10 @@ iMem t = truncate (tMem t) - nMem t - xMem t - fMem t
 
 -- * Display functions
 
-showField :: Node -> String -> String
+-- | Return a field for a given node.
+showField :: Node   -- ^ Node which we're querying
+          -> String -- ^ Field name
+          -> String -- ^ Field value as string
 showField t field =
     case field of
       "idx"  -> printf "%4d" $ idx t
@@ -512,7 +515,7 @@ showField t field =
       T.DynUtil { T.cpuWeight = uC, T.memWeight = uM,
                   T.dskWeight = uD, T.netWeight = uN } = utilLoad t
 
--- | Returns the header and numeric propery of a field
+-- | Returns the header and numeric propery of a field.
 showHeader :: String -> (String, Bool)
 showHeader field =
     case field of
@@ -552,6 +555,7 @@ list :: [String] -> Node -> [String]
 list fields t = map (showField t) fields
 
 
+-- | Constant holding the fields we're displaying by default.
 defaultFields :: [String]
 defaultFields =
     [ "status", "name", "tmem", "nmem", "imem", "xmem", "fmem"
@@ -560,7 +564,7 @@ defaultFields =
     , "cload", "mload", "dload", "nload" ]
 
 -- | Split a list of nodes into a list of (node group UUID, list of
--- associated nodes)
+-- associated nodes).
 computeGroups :: [Node] -> [(T.Gdx, [Node])]
 computeGroups nodes =
   let nodes' = sortBy (comparing group) nodes
diff --git a/htools/Ganeti/HTools/PeerMap.hs b/htools/Ganeti/HTools/PeerMap.hs
index d7c64405756db4c7a089fb07ead7df92f8f9e07d..2d17d2ace313dfbdfe11eca31982048d86a85bab 100644
--- a/htools/Ganeti/HTools/PeerMap.hs
+++ b/htools/Ganeti/HTools/PeerMap.hs
@@ -1,5 +1,4 @@
-{-|
-  Module abstracting the peer map implementation.
+{-| Module abstracting the peer map implementation.
 
 This is abstracted separately since the speed of peermap updates can
 be a significant part of the total runtime, and as such changing the
@@ -46,8 +45,16 @@ import Data.Ord (comparing)
 
 import Ganeti.HTools.Types
 
+-- * Type definitions
+
+-- | Our key type.
 type Key = Ndx
+
+-- | Our element type.
+
 type Elem = Int
+
+-- | The definition of a peer map.
 type PeerMap = [(Key, Elem)]
 
 -- * Initialization functions
@@ -67,7 +74,7 @@ addWith fn k v lst =
       Nothing -> insertBy pmCompare (k, v) lst
       Just o -> insertBy pmCompare (k, fn o v) (remove k lst)
 
--- | Create a PeerMap from an association list, with possible duplicates
+-- | Create a PeerMap from an association list, with possible duplicates.
 accumArray :: (Elem -> Elem -> Elem) -- ^ function used to merge the elements
               -> [(Key, Elem)]       -- ^ source data
               -> PeerMap             -- ^ results
@@ -76,15 +83,15 @@ accumArray fn ((k, v):xs) = addWith fn k v $ accumArray fn xs
 
 -- * Basic operations
 
--- | Returns either the value for a key or zero if not found
+-- | Returns either the value for a key or zero if not found.
 find :: Key -> PeerMap -> Elem
 find k = fromMaybe 0 . lookup k
 
--- | Add an element to a peermap, overwriting the previous value
+-- | Add an element to a peermap, overwriting the previous value.
 add :: Key -> Elem -> PeerMap -> PeerMap
 add = addWith (flip const)
 
--- | Remove an element from a peermap
+-- | Remove an element from a peermap.
 remove :: Key -> PeerMap -> PeerMap
 remove _ [] = []
 remove k ((x@(x', _)):xs) = if k == x'
diff --git a/htools/Ganeti/HTools/QC.hs b/htools/Ganeti/HTools/QC.hs
index 475144716c0ba6fd07d038e2fd90cfd10a22e51e..df3051bc26e8a760755da9bfcfbe0ac52ec8caa4 100644
--- a/htools/Ganeti/HTools/QC.hs
+++ b/htools/Ganeti/HTools/QC.hs
@@ -1,4 +1,4 @@
-{-| Unittests for ganeti-htools
+{-| Unittests for ganeti-htools.
 
 -}
 
@@ -70,15 +70,15 @@ run = flip quickCheckWithResult
 
 -- * Constants
 
--- | Maximum memory (1TiB, somewhat random value)
+-- | Maximum memory (1TiB, somewhat random value).
 maxMem :: Int
 maxMem = 1024 * 1024
 
--- | Maximum disk (8TiB, somewhat random value)
+-- | Maximum disk (8TiB, somewhat random value).
 maxDsk :: Int
 maxDsk = 1024 * 1024 * 8
 
--- | Max CPUs (1024, somewhat random value)
+-- | Max CPUs (1024, somewhat random value).
 maxCpu :: Int
 maxCpu = 1024
 
@@ -95,23 +95,23 @@ defGroupAssoc = Data.Map.singleton (Group.uuid defGroup) (Group.idx defGroup)
 
 -- * Helper functions
 
--- | Simple checker for whether OpResult is fail or pass
+-- | Simple checker for whether OpResult is fail or pass.
 isFailure :: Types.OpResult a -> Bool
 isFailure (Types.OpFail _) = True
 isFailure _ = False
 
--- | Update an instance to be smaller than a node
+-- | Update an instance to be smaller than a node.
 setInstanceSmallerThanNode node inst =
     inst { Instance.mem = Node.availMem node `div` 2
          , Instance.dsk = Node.availDisk node `div` 2
          , Instance.vcpus = Node.availCpu node `div` 2
          }
 
--- | Create an instance given its spec
+-- | Create an instance given its spec.
 createInstance mem dsk vcpus =
     Instance.create "inst-unnamed" mem dsk vcpus "running" [] True (-1) (-1)
 
--- | Create a small cluster by repeating a node spec
+-- | Create a small cluster by repeating a node spec.
 makeSmallCluster :: Node.Node -> Int -> Node.List
 makeSmallCluster node count =
     let fn = Node.buildPeers node Container.empty
@@ -119,7 +119,7 @@ makeSmallCluster node count =
         (_, nlst) = Loader.assignIndices namelst
     in nlst
 
--- | Checks if a node is "big" enough
+-- | Checks if a node is "big" enough.
 isNodeBig :: Node.Node -> Int -> Bool
 isNodeBig node size = Node.availDisk node > size * Types.unitDsk
                       && Node.availMem node > size * Types.unitMem
@@ -129,7 +129,7 @@ canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
 canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
 
 -- | Assigns a new fresh instance to a cluster; this is not
--- allocation, so no resource checks are done
+-- allocation, so no resource checks are done.
 assignInstance :: Node.List -> Instance.List -> Instance.Instance ->
                   Types.Idx -> Types.Idx ->
                   (Node.List, Instance.List)
@@ -149,7 +149,9 @@ assignInstance nl il inst pdx sdx =
 
 -- * Arbitrary instances
 
+-- | Defines a DNS name.
 newtype DNSChar = DNSChar { dnsGetChar::Char }
+
 instance Arbitrary DNSChar where
     arbitrary = do
       x <- elements (['a'..'z'] ++ ['0'..'9'] ++ "_-")
@@ -189,7 +191,11 @@ instance Arbitrary Instance.Instance where
       vcpus <- choose (0, maxCpu)
       return $ Instance.create name mem dsk vcpus run_st [] True pn sn
 
-genNode :: Maybe Int -> Maybe Int -> Gen Node.Node
+-- | Generas an arbitrary node based on sizing information.
+genNode :: Maybe Int -- ^ Minimum node size in terms of units
+        -> Maybe Int -- ^ Maximum node size (when Nothing, bounded
+                     -- just by the max... constants)
+        -> Gen Node.Node
 genNode min_multiplier max_multiplier = do
   let (base_mem, base_dsk, base_cpu) =
           case min_multiplier of
@@ -253,20 +259,28 @@ instance Arbitrary Jobs.OpStatus where
 instance Arbitrary Jobs.JobStatus where
   arbitrary = elements [minBound..maxBound]
 
+newtype SmallRatio = SmallRatio Double deriving Show
+instance Arbitrary SmallRatio where
+    arbitrary = do
+      v <- choose (0, 1)
+      return $ SmallRatio v
+
 -- * Actual tests
 
--- If the list is not just an empty element, and if the elements do
--- not contain commas, then join+split should be idepotent
+-- ** Utils tests
+
+-- | If the list is not just an empty element, and if the elements do
+-- not contain commas, then join+split should be idempotent.
 prop_Utils_commaJoinSplit =
     forAll (arbitrary `suchThat`
             (\l -> l /= [""] && all (not . elem ',') l )) $ \lst ->
     Utils.sepSplit ',' (Utils.commaJoin lst) == lst
 
--- Split and join should always be idempotent
+-- | Split and join should always be idempotent.
 prop_Utils_commaSplitJoin s = Utils.commaJoin (Utils.sepSplit ',' s) == s
 
 -- | fromObjWithDefault, we test using the Maybe monad and an integer
--- value
+-- value.
 prop_Utils_fromObjWithDefault def_value random_key =
     -- a missing key will be returned with the default
     Utils.fromObjWithDefault [] random_key def_value == Just def_value &&
@@ -275,13 +289,16 @@ prop_Utils_fromObjWithDefault def_value random_key =
          random_key (def_value+1) == Just def_value
         where _types = def_value :: Integer
 
+-- | Test list for the Utils module.
 testUtils =
   [ run prop_Utils_commaJoinSplit
   , run prop_Utils_commaSplitJoin
   , run prop_Utils_fromObjWithDefault
   ]
 
--- | Make sure add is idempotent
+-- ** PeerMap tests
+
+-- | Make sure add is idempotent.
 prop_PeerMap_addIdempotent pmap key em =
     fn puniq == fn (fn puniq)
     where _types = (pmap::PeerMap.PeerMap,
@@ -289,33 +306,34 @@ prop_PeerMap_addIdempotent pmap key em =
           fn = PeerMap.add key em
           puniq = PeerMap.accumArray const pmap
 
--- | Make sure remove is idempotent
+-- | Make sure remove is idempotent.
 prop_PeerMap_removeIdempotent pmap key =
     fn puniq == fn (fn puniq)
     where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
           fn = PeerMap.remove key
           puniq = PeerMap.accumArray const pmap
 
--- | Make sure a missing item returns 0
+-- | Make sure a missing item returns 0.
 prop_PeerMap_findMissing pmap key =
     PeerMap.find key (PeerMap.remove key puniq) == 0
     where _types = (pmap::PeerMap.PeerMap, key::PeerMap.Key)
           puniq = PeerMap.accumArray const pmap
 
--- | Make sure an added item is found
+-- | Make sure an added item is found.
 prop_PeerMap_addFind pmap key em =
     PeerMap.find key (PeerMap.add key em puniq) == em
     where _types = (pmap::PeerMap.PeerMap,
                     key::PeerMap.Key, em::PeerMap.Elem)
           puniq = PeerMap.accumArray const pmap
 
--- | Manual check that maxElem returns the maximum indeed, or 0 for null
+-- | Manual check that maxElem returns the maximum indeed, or 0 for null.
 prop_PeerMap_maxElem pmap =
     PeerMap.maxElem puniq == if null puniq then 0
                              else (maximum . snd . unzip) puniq
     where _types = pmap::PeerMap.PeerMap
           puniq = PeerMap.accumArray const pmap
 
+-- | List of tests for the PeerMap module.
 testPeerMap =
     [ run prop_PeerMap_addIdempotent
     , run prop_PeerMap_removeIdempotent
@@ -324,7 +342,7 @@ testPeerMap =
     , run prop_PeerMap_findMissing
     ]
 
--- Container tests
+-- ** Container tests
 
 prop_Container_addTwo cdata i1 i2 =
     fn i1 i2 cont == fn i2 i1 cont &&
@@ -339,9 +357,9 @@ prop_Container_nameOf node =
       fnode = head (Container.elems nl)
   in Container.nameOf nl (Node.idx fnode) == Node.name fnode
 
--- We test that in a cluster, given a random node, we can find it by
+-- | We test that in a cluster, given a random node, we can find it by
 -- its name and alias, as long as all names and aliases are unique,
--- and that we fail to find a non-existing name
+-- and that we fail to find a non-existing name.
 prop_Container_findByName node othername =
   forAll (choose (1, 20)) $ \ cnt ->
   forAll (choose (0, cnt - 1)) $ \ fidx ->
@@ -367,6 +385,8 @@ testContainer =
     , run prop_Container_findByName
     ]
 
+-- ** Instance tests
+
 -- Simple instance tests, we only have setter/getters
 
 prop_Instance_creat inst =
@@ -471,6 +491,8 @@ testInstance =
     , run prop_Instance_setMovable
     ]
 
+-- ** Text backend tests
+
 -- Instance text loader tests
 
 prop_Text_Load_Instance name mem dsk vcpus status
@@ -565,7 +587,7 @@ testText =
     , run prop_Text_NodeLSIdempotent
     ]
 
--- Node tests
+-- ** Node tests
 
 prop_Node_setAlias node name =
     Node.name newnode == Node.name node &&
@@ -585,7 +607,8 @@ prop_Node_setMcpu node mc =
     Node.mCpu newnode == mc
     where newnode = Node.setMcpu node mc
 
--- | Check that an instance add with too high memory or disk will be rejected
+-- | Check that an instance add with too high memory or disk will be
+-- rejected.
 prop_Node_addPriFM node inst = Instance.mem inst >= Node.fMem node &&
                                not (Node.failN1 node)
                                ==>
@@ -615,7 +638,8 @@ prop_Node_addPriFC node inst (Positive extra) =
           inst' = setInstanceSmallerThanNode node inst
           inst'' = inst' { Instance.vcpus = Node.availCpu node + extra }
 
--- | Check that an instance add with too high memory or disk will be rejected
+-- | Check that an instance add with too high memory or disk will be
+-- rejected.
 prop_Node_addSec node inst pdx =
     (Instance.mem inst >= (Node.fMem node - Node.rMem node) ||
      Instance.dsk inst >= Node.fDsk node) &&
@@ -623,7 +647,7 @@ prop_Node_addSec node inst pdx =
     ==> isFailure (Node.addSec node inst pdx)
         where _types = (node::Node.Node, inst::Instance.Instance, pdx::Int)
 
--- | Checks for memory reservation changes
+-- | Checks for memory reservation changes.
 prop_Node_rMem inst =
     forAll (arbitrary `suchThat` ((> 0) . Node.fMem)) $ \node ->
     -- ab = auto_balance, nb = non-auto_balance
@@ -655,13 +679,7 @@ prop_Node_rMem inst =
          x -> printTestCase ("Failed to add/remove instances: " ++ show x)
               False
 
-newtype SmallRatio = SmallRatio Double deriving Show
-instance Arbitrary SmallRatio where
-    arbitrary = do
-      v <- choose (0, 1)
-      return $ SmallRatio v
-
--- | Check mdsk setting
+-- | Check mdsk setting.
 prop_Node_setMdsk node mx =
     Node.loDsk node' >= 0 &&
     fromIntegral (Node.loDsk node') <= Node.tDsk node &&
@@ -715,9 +733,10 @@ testNode =
     ]
 
 
--- Cluster tests
+-- ** Cluster tests
 
--- | Check that the cluster score is close to zero for a homogeneous cluster
+-- | Check that the cluster score is close to zero for a homogeneous
+-- cluster.
 prop_Score_Zero node =
     forAll (choose (1, 1024)) $ \count ->
     (not (Node.offline node) && not (Node.failN1 node) && (count > 0) &&
@@ -730,7 +749,7 @@ prop_Score_Zero node =
     -- this should be much lower than the default score in CLI.hs
     in score <= 1e-12
 
--- | Check that cluster stats are sane
+-- | Check that cluster stats are sane.
 prop_CStats_sane node =
     forAll (choose (1, 1024)) $ \count ->
     (not (Node.offline node) && not (Node.failN1 node) &&
@@ -743,7 +762,7 @@ prop_CStats_sane node =
        Cluster.csAdsk cstats <= Cluster.csFdsk cstats
 
 -- | Check that one instance is allocated correctly, without
--- rebalances needed
+-- rebalances needed.
 prop_ClusterAlloc_sane node inst =
     forAll (choose (5, 20)) $ \count ->
     not (Node.offline node)
@@ -768,7 +787,7 @@ prop_ClusterAlloc_sane node inst =
 
 -- | Checks that on a 2-5 node cluster, we can allocate a random
 -- instance spec via tiered allocation (whatever the original instance
--- spec), on either one or two nodes
+-- spec), on either one or two nodes.
 prop_ClusterCanTieredAlloc node inst =
     forAll (choose (2, 5)) $ \count ->
     forAll (choose (1, 2)) $ \rqnodes ->
@@ -787,7 +806,7 @@ prop_ClusterCanTieredAlloc node inst =
                                       length ixes == length cstats
 
 -- | Checks that on a 4-8 node cluster, once we allocate an instance,
--- we can also evacuate it
+-- we can also evacuate it.
 prop_ClusterAllocEvac node inst =
     forAll (choose (4, 8)) $ \count ->
     not (Node.offline node)
@@ -812,7 +831,7 @@ prop_ClusterAllocEvac node inst =
                _ -> False
 
 -- | Check that allocating multiple instances on a cluster, then
--- adding an empty node, results in a valid rebalance
+-- adding an empty node, results in a valid rebalance.
 prop_ClusterAllocBalance =
     forAll (genNode (Just 5) (Just 128)) $ \node ->
     forAll (choose (3, 5)) $ \count ->
@@ -831,7 +850,7 @@ prop_ClusterAllocBalance =
                        tbl = Cluster.Table ynl il' cv []
                    in canBalance tbl True True False
 
--- | Checks consistency
+-- | Checks consistency.
 prop_ClusterCheckConsistency node inst =
   let nl = makeSmallCluster node 3
       [node1, node2, node3] = Container.elems nl
@@ -845,7 +864,7 @@ prop_ClusterCheckConsistency node inst =
      null (ccheck [(0, inst2)]) &&
      (not . null $ ccheck [(0, inst3)])
 
--- For now, we only test that we don't lose instances during the split
+-- | For now, we only test that we don't lose instances during the split.
 prop_ClusterSplitCluster node inst =
   forAll (choose (0, 100)) $ \icnt ->
   let nl = makeSmallCluster node 2
@@ -867,8 +886,9 @@ testCluster =
     , run prop_ClusterSplitCluster
     ]
 
--- | Check that opcode serialization is idempotent
+-- ** OpCodes tests
 
+-- | Check that opcode serialization is idempotent.
 prop_OpCodes_serialization op =
   case J.readJSON (J.showJSON op) of
     J.Error _ -> False
@@ -879,7 +899,9 @@ testOpCodes =
   [ run prop_OpCodes_serialization
   ]
 
--- | Check that (queued) job\/opcode status serialization is idempotent
+-- ** Jobs tests
+
+-- | Check that (queued) job\/opcode status serialization is idempotent.
 prop_OpStatus_serialization os =
   case J.readJSON (J.showJSON os) of
     J.Error _ -> False
@@ -897,7 +919,7 @@ testJobs =
   , run prop_JobStatus_serialization
   ]
 
--- | Loader tests
+-- ** Loader tests
 
 prop_Loader_lookupNode ktn inst node =
   Loader.lookupNode nl inst node == Data.Map.lookup node nl
@@ -915,9 +937,8 @@ prop_Loader_assignIndices nodes =
    else True)
   where (nassoc, kt) = Loader.assignIndices (map (\n -> (Node.name n, n)) nodes)
 
-
 -- | Checks that the number of primary instances recorded on the nodes
--- is zero
+-- is zero.
 prop_Loader_mergeData ns =
   let na = Container.fromList $ map (\n -> (Node.idx n, n)) ns
   in case Loader.mergeData [] [] [] []
diff --git a/htools/Ganeti/HTools/Rapi.hs b/htools/Ganeti/HTools/Rapi.hs
index 444934d201652e8e31d297eaf3d8651653e6e9b1..19a3a206b4e2ebc3c7cdae671a0f93c18510f89f 100644
--- a/htools/Ganeti/HTools/Rapi.hs
+++ b/htools/Ganeti/HTools/Rapi.hs
@@ -57,7 +57,7 @@ getUrl _ = return $ fail "RAPI/curl backend disabled at compile time"
 
 #else
 
--- | The curl options we use
+-- | The curl options we use.
 curlOpts :: [CurlOption]
 curlOpts = [ CurlSSLVerifyPeer False
            , CurlSSLVerifyHost 0
@@ -97,6 +97,7 @@ getGroups :: String -> Result [(String, Group.Group)]
 getGroups body = loadJSArray "Parsing group data" body >>=
                 mapM (parseGroup . fromJSObject)
 
+-- | Generates a fake group list.
 getFakeGroups :: Result [(String, Group.Group)]
 getFakeGroups =
   return [(defaultGroupID,
@@ -173,7 +174,7 @@ readData master = do
   tags_body <- getUrl $ printf "%s/2/tags" url
   return (group_body, node_body, inst_body, tags_body)
 
--- | Builds the cluster data from the raw Rapi content
+-- | Builds the cluster data from the raw Rapi content.
 parseData :: (Result String, Result String, Result String, Result String)
           -> Result ClusterData
 parseData (group_body, node_body, inst_body, tags_body) = do
@@ -191,7 +192,7 @@ parseData (group_body, node_body, inst_body, tags_body) = do
   tags_data <- tags_body >>= (fromJResult "Parsing tags data" . decodeStrict)
   return (ClusterData group_idx node_idx inst_idx tags_data)
 
--- | Top level function for data loading
+-- | Top level function for data loading.
 loadData :: String -- ^ Cluster or URL to use as source
          -> IO (Result ClusterData)
 loadData = fmap parseData . readData
diff --git a/htools/Ganeti/HTools/Simu.hs b/htools/Ganeti/HTools/Simu.hs
index a763076f259c594bfe4c5ffe07097e5ea71210cf..c1848499b2a7caa942acdc05f313c4c343c66306 100644
--- a/htools/Ganeti/HTools/Simu.hs
+++ b/htools/Ganeti/HTools/Simu.hs
@@ -1,4 +1,4 @@
-{-| Parsing data from a simulated description of the cluster
+{-| Parsing data from a simulated description of the cluster.
 
 This module holds the code for parsing a cluster description.
 
diff --git a/htools/Ganeti/HTools/Text.hs b/htools/Ganeti/HTools/Text.hs
index a52665a75cf71870a9e60de5d2d5f6e260938b83..05eff2d534037471d4ddfc766e8722eb4aca1b8f 100644
--- a/htools/Ganeti/HTools/Text.hs
+++ b/htools/Ganeti/HTools/Text.hs
@@ -1,7 +1,7 @@
-{-| Parsing data from text-files
+{-| Parsing data from text-files.
 
 This module holds the code for loading the cluster state from text
-files, as produced by gnt-node and gnt-instance list.
+files, as produced by @gnt-node@ and @gnt-instance@ @list@ command.
 
 -}
 
@@ -51,18 +51,22 @@ import qualified Ganeti.HTools.Group as Group
 import qualified Ganeti.HTools.Node as Node
 import qualified Ganeti.HTools.Instance as Instance
 
--- | Serialize a single group
+-- * Serialisation functions
+
+-- | Serialize a single group.
 serializeGroup :: Group.Group -> String
 serializeGroup grp =
     printf "%s|%s|%s" (Group.name grp) (Group.uuid grp)
                (apolToString (Group.allocPolicy grp))
 
--- | Generate group file data from a group list
+-- | Generate group file data from a group list.
 serializeGroups :: Group.List -> String
 serializeGroups = unlines . map serializeGroup . Container.elems
 
--- | Serialize a single node
-serializeNode :: Group.List -> Node.Node -> String
+-- | Serialize a single node.
+serializeNode :: Group.List -- ^ The list of groups (needed for group uuid)
+              -> Node.Node  -- ^ The node to be serialised
+              -> String
 serializeNode gl node =
     printf "%s|%.0f|%d|%d|%.0f|%d|%.0f|%c|%s" (Node.name node)
                (Node.tMem node) (Node.nMem node) (Node.fMem node)
@@ -71,12 +75,15 @@ serializeNode gl node =
                (Group.uuid grp)
     where grp = Container.find (Node.group node) gl
 
--- | Generate node file data from node objects
+-- | Generate node file data from node objects.
 serializeNodes :: Group.List -> Node.List -> String
 serializeNodes gl = unlines . map (serializeNode gl) . Container.elems
 
--- | Serialize a single instance
-serializeInstance :: Node.List -> Instance.Instance -> String
+-- | Serialize a single instance.
+serializeInstance :: Node.List         -- ^ The node list (needed for
+                                       -- node names)
+                  -> Instance.Instance -- ^ The instance to be serialised
+                  -> String
 serializeInstance nl inst =
     let
         iname = Instance.name inst
@@ -92,12 +99,12 @@ serializeInstance nl inst =
              (if Instance.auto_balance inst then "Y" else "N")
              pnode snode (intercalate "," (Instance.tags inst))
 
--- | Generate instance file data from instance objects
+-- | Generate instance file data from instance objects.
 serializeInstances :: Node.List -> Instance.List -> String
 serializeInstances nl =
     unlines . map (serializeInstance nl) . Container.elems
 
--- | Generate complete cluster data from node and instance lists
+-- | Generate complete cluster data from node and instance lists.
 serializeCluster :: ClusterData -> String
 serializeCluster (ClusterData gl nl il ctags) =
   let gdata = serializeGroups gl
@@ -106,8 +113,12 @@ serializeCluster (ClusterData gl nl il ctags) =
   -- note: not using 'unlines' as that adds too many newlines
   in intercalate "\n" [gdata, ndata, idata, unlines ctags]
 
+-- * Parsing functions
+
 -- | Load a group from a field list.
-loadGroup :: (Monad m) => [String] -> m (String, Group.Group)
+loadGroup :: (Monad m) => [String]
+          -> m (String, Group.Group) -- ^ The result, a tuple of group
+                                     -- UUID and group object
 loadGroup [name, gid, apol] = do
   xapol <- apolFromString apol
   return (gid, Group.create name gid xapol)
@@ -115,7 +126,11 @@ loadGroup [name, gid, apol] = do
 loadGroup s = fail $ "Invalid/incomplete group data: '" ++ show s ++ "'"
 
 -- | Load a node from a field list.
-loadNode :: (Monad m) => NameAssoc -> [String] -> m (String, Node.Node)
+loadNode :: (Monad m) =>
+            NameAssoc             -- ^ Association list with current groups
+         -> [String]              -- ^ Input data as a list of fields
+         -> m (String, Node.Node) -- ^ The result, a tuple o node name
+                                  -- and node object
 loadNode ktg [name, tm, nm, fm, td, fd, tc, fo, gu] = do
   gdx <- lookupGroup ktg name gu
   new_node <-
@@ -134,7 +149,13 @@ loadNode _ s = fail $ "Invalid/incomplete node data: '" ++ show s ++ "'"
 
 -- | Load an instance from a field list.
 loadInst :: (Monad m) =>
-            NameAssoc -> [String] -> m (String, Instance.Instance)
+            NameAssoc                     -- ^ Association list with
+                                          -- the current nodes
+         -> [String]                      -- ^ Input data as a list of
+                                          -- fields
+         -> m (String, Instance.Instance) -- ^ The result, a tuple of
+                                          -- instance name and the
+                                          -- instance object
 loadInst ktn [name, mem, dsk, vcpus, status, auto_bal, pnode, snode, tags] = do
   pidx <- lookupNode ktn name pnode
   sidx <- (if null snode then return Node.noSecondary
@@ -161,16 +182,26 @@ loadInst _ s = fail $ "Invalid/incomplete instance data: '" ++ show s ++ "'"
 -- @gnt-instance list@ and @gnt-node list@ to a list of objects using
 -- a supplied conversion function.
 loadTabular :: (Monad m, Element a) =>
-               [String] -> ([String] -> m (String, a))
-            -> m (NameAssoc, Container.Container a)
+               [String] -- ^ Input data, as a list of lines
+            -> ([String] -> m (String, a)) -- ^ Conversion function
+            -> m ( NameAssoc
+                 , Container.Container a ) -- ^ A tuple of an
+                                           -- association list (name
+                                           -- to object) and a set as
+                                           -- used in
+                                           -- "Ganeti.HTools.Container"
+
 loadTabular lines_data convert_fn = do
   let rows = map (sepSplit '|') lines_data
   kerows <- mapM convert_fn rows
   return $ assignIndices kerows
 
 -- | Load the cluser data from disk.
-readData :: String -- ^ Path to the text file
-         -> IO String
+--
+-- This is an alias to 'readFile' just for consistency with the other
+-- modules.
+readData :: String    -- ^ Path to the text file
+         -> IO String -- ^ Contents of the file
 readData = readFile
 
 -- | Builds the cluster data from text input.
@@ -192,7 +223,7 @@ parseData fdata = do
   {- the tags are simply line-based, no processing needed -}
   return (ClusterData gl nl il ctags)
 
--- | Top level function for data loading
+-- | Top level function for data loading.
 loadData :: String -- ^ Path to the text file
          -> IO (Result ClusterData)
 loadData = fmap parseData . readData
diff --git a/htools/Ganeti/HTools/Types.hs b/htools/Ganeti/HTools/Types.hs
index 245e24f145e60c671e25e2fe7b423c70fce99ee9..be7ec18fcbd1c99e869d1df75f395c56799fc4bc 100644
--- a/htools/Ganeti/HTools/Types.hs
+++ b/htools/Ganeti/HTools/Types.hs
@@ -102,7 +102,7 @@ data AllocPolicy
                        -- allocations
       deriving (Show, Read, Eq, Ord)
 
--- | Convert a string to an alloc policy
+-- | Convert a string to an alloc policy.
 apolFromString :: (Monad m) => String -> m AllocPolicy
 apolFromString s =
     case () of
@@ -111,7 +111,7 @@ apolFromString s =
         | s == C.allocPolicyUnallocable -> return AllocUnallocable
         | otherwise -> fail $ "Invalid alloc policy mode: " ++ s
 
--- | Convert an alloc policy to the Ganeti string equivalent
+-- | Convert an alloc policy to the Ganeti string equivalent.
 apolToString :: AllocPolicy -> String
 apolToString AllocPreferred   = C.allocPolicyPreferred
 apolToString AllocLastResort  = C.allocPolicyLastResort
@@ -140,19 +140,23 @@ data DynUtil = DynUtil
     , netWeight :: Weight -- ^ Standardised network usage
     } deriving (Show, Read, Eq)
 
--- | Initial empty utilisation
+-- | Initial empty utilisation.
 zeroUtil :: DynUtil
 zeroUtil = DynUtil { cpuWeight = 0, memWeight = 0
                    , dskWeight = 0, netWeight = 0 }
 
+-- | Base utilisation (used when no actual utilisation data is
+-- supplied).
 baseUtil :: DynUtil
 baseUtil = DynUtil { cpuWeight = 1, memWeight = 1
                    , dskWeight = 1, netWeight = 1 }
 
+-- | Sum two utilisation records.
 addUtil :: DynUtil -> DynUtil -> DynUtil
 addUtil (DynUtil a1 a2 a3 a4) (DynUtil b1 b2 b3 b4) =
     DynUtil (a1+b1) (a2+b2) (a3+b3) (a4+b4)
 
+-- | Substracts one utilisation record from another.
 subUtil :: DynUtil -> DynUtil -> DynUtil
 subUtil (DynUtil a1 a2 a3 a4) (DynUtil b1 b2 b3 b4) =
     DynUtil (a1-b1) (a2-b2) (a3-b3) (a4-b4)
@@ -162,7 +166,7 @@ subUtil (DynUtil a1 a2 a3 a4) (DynUtil b1 b2 b3 b4) =
 -- performed and the score of the cluster after the move.
 type Placement = (Idx, Ndx, Ndx, IMove, Score)
 
--- | An instance move definition
+-- | An instance move definition.
 data IMove = Failover                -- ^ Failover the instance (f)
            | ReplacePrimary Ndx      -- ^ Replace primary (f, r:np, f)
            | ReplaceSecondary Ndx    -- ^ Replace secondary (r:ns)
@@ -171,14 +175,14 @@ data IMove = Failover                -- ^ Failover the instance (f)
              deriving (Show, Read)
 
 -- | Formatted solution output for one move (involved nodes and
--- commands
+-- commands.
 type MoveJob = ([Ndx], Idx, IMove, [String])
 
--- | Unknown field in table output
+-- | Unknown field in table output.
 unknownField :: String
 unknownField = "<unknown field>"
 
--- | A list of command elements
+-- | A list of command elements.
 type JobSet = [MoveJob]
 
 -- | Connection timeout (when using non-file methods).
@@ -211,7 +215,7 @@ unitCpu = 1
 
 {-|
 
-This is similar to the JSON library Result type - *very* similar, but
+This is similar to the JSON library Result type - /very/ similar, but
 we want to use it in multiple places, so we abstract it into a
 mini-library here
 
@@ -227,16 +231,16 @@ instance Monad Result where
     return = Ok
     fail = Bad
 
--- | Simple checker for whether Result is OK
+-- | Simple checker for whether a 'Result' is OK.
 isOk :: Result a -> Bool
 isOk (Ok _) = True
 isOk _ = False
 
--- | Simple checker for whether Result is a failure
+-- | Simple checker for whether a 'Result' is a failure.
 isBad :: Result a  -> Bool
 isBad = not . isOk
 
--- | Reason for an operation's falure
+-- | Reason for an operation's falure.
 data FailMode = FailMem  -- ^ Failed due to not enough RAM
               | FailDisk -- ^ Failed due to not enough disk
               | FailCPU  -- ^ Failed due to not enough CPU capacity
@@ -244,10 +248,10 @@ data FailMode = FailMem  -- ^ Failed due to not enough RAM
               | FailTags -- ^ Failed due to tag exclusion
                 deriving (Eq, Enum, Bounded, Show, Read)
 
--- | List with failure statistics
+-- | List with failure statistics.
 type FailStats = [(FailMode, Int)]
 
--- | Either-like data-type customized for our failure modes
+-- | Either-like data-type customized for our failure modes.
 data OpResult a = OpFail FailMode -- ^ Failed operation
                 | OpGood a        -- ^ Success operation
                   deriving (Show, Read)
@@ -268,7 +272,7 @@ class Element a where
     -- | Updates the alias of the element
     setAlias :: a -> String -> a
     -- | Compute the alias by stripping a given suffix (domain) from
-    -- | the name
+    -- the name
     computeAlias :: String -> a -> a
     computeAlias dom e = setAlias e alias
         where alias = take (length name - length dom) name
diff --git a/htools/Ganeti/HTools/Utils.hs b/htools/Ganeti/HTools/Utils.hs
index efac41e4f7d7eb19d7b46ce18b299a32e4bbb099..efc3d62d13ec9f481110d1a7babab554e7b2d8cf 100644
--- a/htools/Ganeti/HTools/Utils.hs
+++ b/htools/Ganeti/HTools/Utils.hs
@@ -1,4 +1,4 @@
-{-| Utility functions -}
+{-| Utility functions. -}
 
 {-
 
@@ -62,15 +62,16 @@ import Ganeti.HTools.Types
 debug :: Show a => a -> a
 debug x = trace (show x) x
 
--- | Displays a modified form of the second parameter before returning it
+-- | Displays a modified form of the second parameter before returning
+-- it.
 debugFn :: Show b => (a -> b) -> a -> a
 debugFn fn x = debug (fn x) `seq` x
 
--- | Show the first parameter before returning the second one
+-- | Show the first parameter before returning the second one.
 debugXy :: Show a => a -> b -> b
 debugXy a b = debug a `seq` b
 
--- * Miscelaneous
+-- * Miscellaneous
 
 -- | Comma-join a string list.
 commaJoin :: [String] -> String
@@ -91,7 +92,7 @@ sepSplit sep s
 -- Simple and slow statistical functions, please replace with better
 -- versions
 
--- | Standard deviation function
+-- | Standard deviation function.
 stdDev :: [Double] -> Double
 stdDev lst =
   -- first, calculate the list length and sum lst in a single step,
@@ -107,7 +108,7 @@ stdDev lst =
 
 -- * JSON-related functions
 
--- | A type alias for the list-based representation of J.JSObject
+-- | A type alias for the list-based representation of J.JSObject.
 type JSRecord = [(String, J.JSValue)]
 
 -- | Converts a JSON Result into a monadic value.
@@ -153,7 +154,7 @@ fromObjWithDefault :: (J.JSON a, Monad m) =>
                       JSRecord -> String -> a -> m a
 fromObjWithDefault o k d = liftM (fromMaybe d) $ maybeFromObj o k
 
--- | Reads a JValue, that originated from an object key
+-- | Reads a JValue, that originated from an object key.
 fromKeyValue :: (J.JSON a, Monad m)
               => String     -- ^ The key name
               -> J.JSValue  -- ^ The value to read
@@ -161,13 +162,13 @@ fromKeyValue :: (J.JSON a, Monad m)
 fromKeyValue k val =
   fromJResult (printf "key '%s', value '%s'" k (show val)) (J.readJSON val)
 
--- | Annotate a Result with an ownership information
+-- | Annotate a Result with an ownership information.
 annotateResult :: String -> Result a -> Result a
 annotateResult owner (Bad s) = Bad $ owner ++ ": " ++ s
 annotateResult _ v = v
 
 -- | Try to extract a key from a object with better error reporting
--- than fromObj
+-- than fromObj.
 tryFromObj :: (J.JSON a) =>
               String     -- ^ Textual "owner" in error messages
            -> JSRecord   -- ^ The object array
@@ -194,7 +195,7 @@ asObjectList = mapM asJSObject
 
 -- * Parsing utility functions
 
--- | Parse results from readsPrec
+-- | Parse results from readsPrec.
 parseChoices :: (Monad m, Read a) => String -> String -> [(a, String)] -> m a
 parseChoices _ _ ((v, ""):[]) = return v
 parseChoices name s ((_, e):[]) =
@@ -206,7 +207,7 @@ parseChoices name s _ = fail $ name ++ ": cannot parse string '" ++ s ++ "'"
 tryRead :: (Monad m, Read a) => String -> String -> m a
 tryRead name s = parseChoices name s $ reads s
 
--- | Format a table of strings to maintain consistent length
+-- | Format a table of strings to maintain consistent length.
 formatTable :: [[String]] -> [Bool] -> [[String]]
 formatTable vals numpos =
     let vtrans = transpose vals  -- transpose, so that we work on rows
@@ -225,6 +226,6 @@ formatTable vals numpos =
                     ) (zip3 vtrans numpos mlens)
    in transpose expnd
 
--- | Default group UUID (just a string, not a real UUID)
+-- | Default group UUID (just a string, not a real UUID).
 defaultGroupID :: GroupID
 defaultGroupID = "00000000-0000-0000-0000-000000000000"
diff --git a/htools/Ganeti/Jobs.hs b/htools/Ganeti/Jobs.hs
index 3184a5ac5569c3dfb273db57dda7d2db91c5389f..cec266bd82826377c9c9bb09960635825712e1da 100644
--- a/htools/Ganeti/Jobs.hs
+++ b/htools/Ganeti/Jobs.hs
@@ -33,6 +33,7 @@ import qualified Text.JSON as J
 
 import qualified Ganeti.Constants as C
 
+-- | Our ADT for the OpCode status at runtime (while in a job).
 data OpStatus = OP_STATUS_QUEUED
               | OP_STATUS_WAITLOCK
               | OP_STATUS_CANCELING
diff --git a/htools/Ganeti/Luxi.hs b/htools/Ganeti/Luxi.hs
index 56024a583646189ed77f180134f0c04518989b14..f475245a8963674cc54b8ddb40fedc2510433565 100644
--- a/htools/Ganeti/Luxi.hs
+++ b/htools/Ganeti/Luxi.hs
@@ -161,7 +161,7 @@ recvMsg s = do
   writeIORef (rbuf s) nbuf
   return msg
 
--- | Compute the serialized form of a Luxi operation
+-- | Compute the serialized form of a Luxi operation.
 opToArgs :: LuxiOp -> JSValue
 opToArgs (QueryNodes names fields lock) = J.showJSON (names, fields, lock)
 opToArgs (QueryGroups names fields lock) = J.showJSON (names, fields, lock)
diff --git a/htools/Ganeti/OpCodes.hs b/htools/Ganeti/OpCodes.hs
index 491ecda78bc900a47af7f25b8fa1b624590a98ae..6cce74380a17f0dede2114d33b20ecc25d24b87a 100644
--- a/htools/Ganeti/OpCodes.hs
+++ b/htools/Ganeti/OpCodes.hs
@@ -36,6 +36,7 @@ import Text.JSON.Types
 
 import Ganeti.HTools.Utils
 
+-- | Replace disks type.
 data ReplaceDisksMode = ReplaceOnPrimary
                   | ReplaceOnSecondary
                   | ReplaceNewSecondary
@@ -55,6 +56,10 @@ instance JSON ReplaceDisksMode where
                    J.Ok "replace_auto" -> J.Ok ReplaceAuto
                    _ -> J.Error "Can't parse a valid ReplaceDisksMode"
 
+-- | OpCode representation.
+--
+-- We only implement a subset of Ganeti opcodes, but only what we
+-- actually use in the htools codebase.
 data OpCode = OpTestDelay Double Bool [String]
             | OpInstanceReplaceDisks String (Maybe String) ReplaceDisksMode
               [Int] (Maybe String)
@@ -63,12 +68,14 @@ data OpCode = OpTestDelay Double Bool [String]
             deriving (Show, Read, Eq)
 
 
+-- | Computes the OP_ID for an OpCode.
 opID :: OpCode -> String
 opID (OpTestDelay _ _ _) = "OP_TEST_DELAY"
 opID (OpInstanceReplaceDisks _ _ _ _ _) = "OP_INSTANCE_REPLACE_DISKS"
 opID (OpInstanceFailover _ _) = "OP_INSTANCE_FAILOVER"
 opID (OpInstanceMigrate _ _ _ _) = "OP_INSTANCE_MIGRATE"
 
+-- | Loads an OpCode from the JSON serialised form.
 loadOpCode :: JSValue -> J.Result OpCode
 loadOpCode v = do
   o <- liftM J.fromJSObject (readJSON v)
@@ -99,6 +106,7 @@ loadOpCode v = do
                  return $ OpInstanceMigrate inst live cleanup allow_failover
     _ -> J.Error $ "Unknown opcode " ++ op_id
 
+-- | Serialises an opcode to JSON.
 saveOpCode :: OpCode -> JSValue
 saveOpCode op@(OpTestDelay duration on_master on_nodes) =
     let ol = [ ("OP_ID", showJSON $ opID op)
diff --git a/htools/hail.hs b/htools/hail.hs
index 75f4490990e6fcebde52a868e6b7a75e23545559..7a1c6817e504b36fb39f4b3601e61391a42654ca 100644
--- a/htools/hail.hs
+++ b/htools/hail.hs
@@ -1,4 +1,4 @@
-{-| Solver for N+1 cluster errors
+{-| IAllocator plugin for Ganeti.
 
 -}
 
diff --git a/htools/hbal.hs b/htools/hbal.hs
index 47696aec722db8a0fa335089fdbe390f0aa35e4b..432dfe32c2e96a079d5973515a91305cbd2b67c6 100644
--- a/htools/hbal.hs
+++ b/htools/hbal.hs
@@ -1,4 +1,4 @@
-{-| Cluster rebalancer
+{-| Cluster rebalancer.
 
 -}
 
diff --git a/htools/test.hs b/htools/test.hs
index 228ef914debc6e4cacfa12a3508384c02d2dfb61..f0220ef850356cbe2e1402bcb60cad7b66074fd1 100644
--- a/htools/test.hs
+++ b/htools/test.hs
@@ -1,4 +1,4 @@
-{-| Unittest runner for ganeti-htools
+{-| Unittest runner for ganeti-htools.
 
 -}