Commit 866e1f76 authored by Klaus Aehlig's avatar Klaus Aehlig

Merge branch 'stable-2.10' into stable-2.11

* stable-2.10
  Test parallel instance ops and plain instances
  Test parallel creation of DRBD instances
  Test parallel job submission performance
  Test parallel instance query operations
  Test parallel instance operations
  Test parallel instance modification
  Test parallel node-count instance creation
  Test parallel instance creation and removal
  Fail in replace-disks if attaching disks fails
  Add a basic test for --restricted-migration
  Describe the --restricted-migration option
  Support restricted migration
  Add an option for restricted migration
  Add an example for node evacuation
  Add a test for parsing version strings

* stable-2.9
  KVM: set IFF_ONE_QUEUE on created tap interfaces
  Add configure option to pass GHC flags

Conflicts:
	Makefile.am
	configure.ac
	qa/qa_utils.py
Resolution:
	Take ALL the additions
Semantical conflict:
	make hsqueeze use the modified tryBalance signature
Signed-off-by: default avatarKlaus Aehlig <aehlig@google.com>
Reviewed-by: default avatarPetr Pudlak <pudlak@google.com>
parents 89922d01 c2a97cee
......@@ -658,6 +658,8 @@ HTEST_FLAGS = $(HFLAGS) -fhpc -itest/hs \
HEXTRA =
# internal extra flags (used for test/hs/htest mainly)
HEXTRA_INT =
# combination of HEXTRA and HEXTRA_CONFIGURE
HEXTRA_COMBINED = $(HEXTRA) $(HEXTRA_CONFIGURE)
# exclude options for coverage reports
HPCEXCL = --exclude Main \
--exclude Ganeti.Constants \
......@@ -1032,26 +1034,26 @@ endif
Makefile.ghc: $(HS_MAKEFILE_GHC_SRCS) Makefile \
| $(built_base_sources) $(HS_BUILT_SRCS)
$(GHC) -M -dep-makefile $@ -dep-suffix $(HTEST_SUFFIX) $(HFLAGS) -itest/hs \
$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA) $(HS_MAKEFILE_GHC_SRCS)
$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) $(HS_MAKEFILE_GHC_SRCS)
@include_makefile_ghc@
%.o:
@echo '[GHC]: $@ <- $^'
@$(GHC) -c $(HFLAGS) \
$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA) $(@:%.o=%.hs)
$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) $(@:%.o=%.hs)
%.$(HTEST_SUFFIX)_o:
@echo '[GHC]: $@ <- $^'
@$(GHC) -c $(HTEST_FLAGS) \
$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA) $(@:%.$(HTEST_SUFFIX)_o=%.hs)
$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) $(@:%.$(HTEST_SUFFIX)_o=%.hs)
%.hi: %.o ;
%.$(HTEST_SUFFIX)_hi: %.$(HTEST_SUFFIX)_o ;
$(HS_SRC_PROGS): %: %.o | stamp-directories
$(GHC) $(HFLAGS) \
$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA) --make $(@:%=%.hs)
$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) --make $(@:%=%.hs)
@rm -f $(notdir $@).tix
@touch "$@"
......@@ -1063,7 +1065,7 @@ $(HS_TEST_PROGS): %: %.$(HTEST_SUFFIX)_o \
exit 1; \
fi
$(GHC) $(HTEST_FLAGS) \
$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA) --make $(@:%=%.hs)
$(HS_PARALLEL3) $(HS_REGEX_PCRE) $(HEXTRA_COMBINED) --make $(@:%=%.hs)
@rm -f $(notdir $@).tix
@touch "$@"
......@@ -1279,6 +1281,7 @@ TEST_FILES = \
test/data/htools/hail-node-evac.json \
test/data/htools/hail-reloc-drbd.json \
test/data/htools/hbal-dyn.data \
test/data/htools/hbal-evac.data \
test/data/htools/hbal-excl-tags.data \
test/data/htools/hbal-split-insts.data \
test/data/htools/hspace-tiered-dualspec-exclusive.data \
......@@ -1311,6 +1314,7 @@ TEST_FILES = \
test/hs/shelltests/htools-dynutil.test \
test/hs/shelltests/htools-excl.test \
test/hs/shelltests/htools-hail.test \
test/hs/shelltests/htools-hbal-evac.test \
test/hs/shelltests/htools-hroller.test \
test/hs/shelltests/htools-hspace.test \
test/hs/shelltests/htools-hsqueeze.test \
......
......@@ -108,6 +108,15 @@ AC_ARG_ENABLE([haskell-tests],
AC_SUBST(HTEST, $HTEST)
AM_CONDITIONAL([HTEST], [test "$HTEST" = yes])
# --with-haskell-flags=
AC_ARG_WITH([haskell-flags],
[AS_HELP_STRING([--with-haskell-flags=FLAGS],
[Extra flags to pass to GHC]
)],
[hextra_configure="$withval"],
[hextra_configure=""])
AC_SUBST(HEXTRA_CONFIGURE, $hextra_configure)
# --with-ssh-initscript=...
AC_ARG_WITH([ssh-initscript],
[AS_HELP_STRING([--with-ssh-initscript=SCRIPT],
......
......@@ -67,7 +67,7 @@ The following tests are added to the QA:
return within a reasonable low timeout.
* For the maximum amount of instances in the cluster, submit add-,
remove- and list-tags jobs.
* Submit 200 `gnt-debug delay` jobs with a delay of 1 seconds. To
* Submit 200 `gnt-debug delay` jobs with a delay of 0.1 seconds. To
speed up submission, perform multiple job submissions in parallel.
Verify that submitting jobs doesn't significantly slow down during
the process. Verify that querying cluster information over CLI and
......@@ -89,10 +89,14 @@ The following tests are added to the QA:
* Submitting twice as many instance creation request as there are
nodes in the cluster, using DRBD as disk template. As soon as a
creation job succeeds, submit a removal job for this instance.
* Create an instance using DRBD. Fail it over, migrate it, recreate
its disk, change its secondary node, reboot it and reinstall it
while creating an additional instance in parallel to each of those
operations.
* Submitting twice as many instance creation request as there are
nodes in the cluster, using Plain as disk template. As soon as a
creation job succeeds, submit a removal job for this instance.
This test can make better use of parallelism because only one
node must be locked for an instance creation.
* Create an instance using DRBD. Fail it over, migrate it, change
its secondary node, reboot it and reinstall it while creating an
additional instance in parallel to each of those operations.
Future work
===========
......
......@@ -2581,10 +2581,10 @@ class TLReplaceDisks(Tasklet):
for to_node, to_result in result.items():
msg = to_result.fail_msg
if msg:
self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
self.cfg.GetNodeName(to_node), msg,
hint=("please do a gnt-instance info to see the"
" status of disks"))
raise errors.OpExecError(
"Can't attach drbd disks on node %s: %s (please do a gnt-instance "
"info to see the status of disks)" %
(self.cfg.GetNodeName(to_node), msg))
cstep = itertools.count(5)
......
......@@ -71,6 +71,7 @@ TUNGETIFF = 0x800454d2
TUNGETFEATURES = 0x800454cf
IFF_TAP = 0x0002
IFF_NO_PI = 0x1000
IFF_ONE_QUEUE = 0x2000
IFF_VNET_HDR = 0x4000
#: SPICE parameters which depend on L{constants.HV_KVM_SPICE_BIND}
......@@ -306,7 +307,7 @@ def _OpenTap(vnet_hdr=True):
except EnvironmentError:
raise errors.HypervisorError("Failed to open /dev/net/tun")
flags = IFF_TAP | IFF_NO_PI
flags = IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE
if vnet_hdr and _ProbeTapVnetHdr(tapfd):
flags |= IFF_VNET_HDR
......
......@@ -33,6 +33,7 @@ Algorithm options:
**[ \--ignore-dynu ]**
**[ \--mond *yes|no* ]**
**[ \--evac-mode ]**
**[ \--restricted-migration ]**
**[ \--select-instances *inst...* ]**
**[ \--exclude-instances *inst...* ]**
......@@ -307,6 +308,15 @@ The options that can be passed to the program are as follows:
(bulk) replacement for Ganeti's own *gnt-node evacuate*, with the
note that it doesn't guarantee full evacuation.
\--restricted-migration
This parameter disallows any replace-primary moves (frf), as well as
those replace-and-failover moves (rf) where the primary node of the
instance is not drained. If used together with the ``--evac-mode``
option, the only migrations that hbal will do are migrations of
instances off a drained node. This can be useful if during a reinstall
of the base operating system migration is only possible from the old
OS to the new OS.
\--select-instances=*instances*
This parameter marks the given instances (as a comma-separated list)
as the only ones being moved during the rebalance.
......
......@@ -44,6 +44,7 @@ import qa_monitoring
import qa_network
import qa_node
import qa_os
import qa_performance
import qa_job
import qa_rapi
import qa_tags
......@@ -846,6 +847,43 @@ def RunMonitoringTests():
RunTest(qa_monitoring.TestInstStatusCollector)
def RunPerformanceTests():
if qa_config.TestEnabled("jobqueue-performance"):
RunTest(qa_performance.TestParallelMaxInstanceCreationPerformance)
RunTest(qa_performance.TestParallelNodeCountInstanceCreationPerformance)
instances = qa_performance.CreateAllInstances()
RunTest(qa_performance.TestParallelModify, instances)
RunTest(qa_performance.TestParallelInstanceOSOperations, instances)
RunTest(qa_performance.TestParallelInstanceQueries, instances)
qa_performance.RemoveAllInstances(instances)
RunTest(qa_performance.TestJobQueueSubmissionPerformance)
if qa_config.TestEnabled("parallel-performance"):
RunTest(qa_performance.TestParallelDRBDInstanceCreationPerformance)
RunTest(qa_performance.TestParallelPlainInstanceCreationPerformance)
if qa_config.IsTemplateSupported(constants.DT_DRBD8):
inodes = qa_config.AcquireManyNodes(2)
try:
instance = qa_instance.TestInstanceAddWithDrbdDisk(inodes)
try:
RunTest(qa_performance.TestParallelInstanceFailover, instance)
RunTest(qa_performance.TestParallelInstanceMigration, instance)
RunTest(qa_performance.TestParallelInstanceReplaceDisks, instance)
RunTest(qa_performance.TestParallelInstanceReboot, instance)
RunTest(qa_performance.TestParallelInstanceReinstall, instance)
RunTest(qa_performance.TestParallelInstanceRename, instance)
finally:
qa_instance.TestInstanceRemove(instance)
instance.Release()
finally:
qa_config.ReleaseManyNodes(inodes)
def RunQa():
"""Main QA body.
......@@ -985,6 +1023,8 @@ def RunQa():
RunTestBlock(RunMonitoringTests)
RunPerformanceTests()
RunTestIf("create-cluster", qa_node.TestNodeRemoveAll)
RunTestIf("cluster-destroy", qa_cluster.TestClusterDestroy)
......
......@@ -43,10 +43,10 @@ from qa_utils import AssertCommand, GetCommandOutput, GetObjectInfo
AVAILABLE_LOCKS = [locking.LEVEL_NODE, ]
def _GetOutputFromMaster(cmd,
# pylint: disable=W0613
# (only in later branches required)
use_multiplexer=True, log_cmd=True):
def GetOutputFromMaster(cmd,
# pylint: disable=W0613
# (only in later branches required)
use_multiplexer=True, log_cmd=True):
""" Gets the output of a command executed on master.
"""
......@@ -69,9 +69,12 @@ def ExecuteJobProducingCommand(cmd):
@param cmd: The command to execute, broken into constituent components.
"""
job_id_output = _GetOutputFromMaster(cmd)
job_id_output = GetOutputFromMaster(cmd)
possible_job_ids = re.findall("JobID: ([0-9]+)", job_id_output)
# Usually, the output contains "JobID: <job_id>", but for instance related
# commands, the output is of the form "<job_id>: <instance_name>"
possible_job_ids = re.findall("JobID: ([0-9]+)", job_id_output) or \
re.findall("([0-9]+): .+", job_id_output)
if len(possible_job_ids) != 1:
raise qa_error.Error("Cannot parse command output to find job id: output "
"is %s" % job_id_output)
......@@ -154,7 +157,7 @@ def _GetNodeUUIDMap(nodes):
"""
cmd = ["gnt-node", "list", "--no-header", "-o", "name,uuid"]
cmd.extend(nodes)
output = _GetOutputFromMaster(cmd)
output = GetOutputFromMaster(cmd)
return dict(map(lambda x: x.split(), output.splitlines()))
......@@ -204,7 +207,7 @@ def _GetBlockingLocks():
# Due to mysterious issues when a SSH multiplexer is being used by two
# threads, we turn it off, and block most of the logging to improve the
# visibility of the other thread's output
locks_output = _GetOutputFromMaster("gnt-debug locks", use_multiplexer=False,
locks_output = GetOutputFromMaster("gnt-debug locks", use_multiplexer=False,
log_cmd=False)
# The first non-empty line is the header, which we do not need
......
This diff is collapsed.
......@@ -24,6 +24,7 @@
"""
import copy
import datetime
import operator
import os
import random
......@@ -142,7 +143,7 @@ def _AssertRetCode(rcode, fail, cmdstr, nodename):
(cmdstr, nodename, rcode))
def AssertCommand(cmd, fail=False, node=None, log_cmd=True):
def AssertCommand(cmd, fail=False, node=None, log_cmd=True, max_seconds=None):
"""Checks that a remote command succeeds.
@param cmd: either a string (the command to execute) or a list (to
......@@ -154,6 +155,9 @@ def AssertCommand(cmd, fail=False, node=None, log_cmd=True):
dict or a string)
@param log_cmd: if False, the command won't be logged (simply passed to
StartSSH)
@type max_seconds: double
@param max_seconds: fail if the command takes more than C{max_seconds}
seconds
@return: the return code of the command
@raise qa_error.Error: if the command fails when it shouldn't or vice versa
......@@ -168,9 +172,17 @@ def AssertCommand(cmd, fail=False, node=None, log_cmd=True):
else:
cmdstr = utils.ShellQuoteArgs(cmd)
start = datetime.datetime.now()
rcode = StartSSH(nodename, cmdstr, log_cmd=log_cmd).wait()
duration_seconds = TimedeltaToTotalSeconds(datetime.datetime.now() - start)
_AssertRetCode(rcode, fail, cmdstr, nodename)
if max_seconds is not None:
if duration_seconds > max_seconds:
raise qa_error.Error(
"Cmd '%s' took %f seconds, maximum of %f was exceeded" %
(cmdstr, duration_seconds, max_seconds))
return rcode
......@@ -880,3 +892,19 @@ def UsesIPv6Connection(host, port):
"""
return any(t[0] == socket.AF_INET6 for t in socket.getaddrinfo(host, port))
def TimedeltaToTotalSeconds(td):
"""Returns the total seconds in a C{datetime.timedelta} object.
This performs the same task as the C{datetime.timedelta.total_seconds()}
method which is present in Python 2.7 onwards.
@type td: datetime.timedelta
@param td: timedelta object to convert
@rtype float
@return: total seconds in the timedelta object
"""
return ((td.microseconds + (td.seconds + td.days * 24.0 * 3600.0) * 10 ** 6) /
10 ** 6)
......@@ -52,6 +52,7 @@ module Ganeti.HTools.CLI
, oMonD
, oMonDDataFile
, oEvacMode
, oRestrictedMigrate
, oExInst
, oExTags
, oExecJobs
......@@ -132,6 +133,7 @@ data Options = Options
, optMonDFile :: Maybe FilePath -- ^ Optional file with data provided
-- ^ by MonDs
, optEvacMode :: Bool -- ^ Enable evacuation mode
, optRestrictedMigrate :: Bool -- ^ Disallow replace-primary moves
, optExInst :: [String] -- ^ Instances to be excluded
, optExTags :: Maybe [String] -- ^ Tags to use for exclusion
, optExecJobs :: Bool -- ^ Execute the commands via Luxi
......@@ -191,6 +193,7 @@ defaultOptions = Options
, optMonD = False
, optMonDFile = Nothing
, optEvacMode = False
, optRestrictedMigrate = False
, optExInst = []
, optExTags = Nothing
, optExecJobs = False
......@@ -368,6 +371,14 @@ oEvacMode =
\ instances away from offline and drained nodes",
OptComplNone)
oRestrictedMigrate :: OptType
oRestrictedMigrate =
(Option "" ["restricted-migration"]
(NoArg (\opts -> Ok opts { optRestrictedMigrate = True }))
"disallow replace-primary moves (aka frf-moves); in evacuation mode, this\
\ will ensure that the only migrations are off the drained nodes",
OptComplNone)
oExInst :: OptType
oExInst =
(Option "" ["exclude-instances"]
......
......@@ -551,27 +551,39 @@ checkSingleStep ini_tbl target cur_tbl move =
possibleMoves :: MirrorType -- ^ The mirroring type of the instance
-> Bool -- ^ Whether the secondary node is a valid new node
-> Bool -- ^ Whether we can change the primary node
-> (Bool, Bool) -- ^ Whether migration is restricted and whether
-- the instance primary is offline
-> Ndx -- ^ Target node candidate
-> [IMove] -- ^ List of valid result moves
possibleMoves MirrorNone _ _ _ = []
possibleMoves MirrorNone _ _ _ _ = []
possibleMoves MirrorExternal _ False _ = []
possibleMoves MirrorExternal _ False _ _ = []
possibleMoves MirrorExternal _ True tdx =
possibleMoves MirrorExternal _ True _ tdx =
[ FailoverToAny tdx ]
possibleMoves MirrorInternal _ False tdx =
possibleMoves MirrorInternal _ False _ tdx =
[ ReplaceSecondary tdx ]
possibleMoves MirrorInternal True True tdx =
possibleMoves MirrorInternal _ _ (True, False) tdx =
[ ReplaceSecondary tdx
]
possibleMoves MirrorInternal True True (False, _) tdx =
[ ReplaceSecondary tdx
, ReplaceAndFailover tdx
, ReplacePrimary tdx
, FailoverAndReplace tdx
]
possibleMoves MirrorInternal False True tdx =
possibleMoves MirrorInternal True True (True, True) tdx =
[ ReplaceSecondary tdx
, ReplaceAndFailover tdx
, FailoverAndReplace tdx
]
possibleMoves MirrorInternal False True _ tdx =
[ ReplaceSecondary tdx
, ReplaceAndFailover tdx
]
......@@ -580,10 +592,12 @@ possibleMoves MirrorInternal False True tdx =
checkInstanceMove :: [Ndx] -- ^ Allowed target node indices
-> Bool -- ^ Whether disk moves are allowed
-> Bool -- ^ Whether instance moves are allowed
-> Bool -- ^ Whether migration is restricted
-> Table -- ^ Original table
-> Instance.Instance -- ^ Instance to move
-> Table -- ^ Best new table for this instance
checkInstanceMove nodes_idx disk_moves inst_moves ini_tbl target =
checkInstanceMove nodes_idx disk_moves inst_moves rest_mig
ini_tbl@(Table nl _ _ _) target =
let opdx = Instance.pNode target
osdx = Instance.sNode target
bad_nodes = [opdx, osdx]
......@@ -594,9 +608,13 @@ checkInstanceMove nodes_idx disk_moves inst_moves ini_tbl target =
-- if drbd and allowed to failover
then checkSingleStep ini_tbl target ini_tbl Failover
else ini_tbl
primary_drained = Node.offline
. flip Container.find nl
$ Instance.pNode target
all_moves =
if disk_moves
then concatMap (possibleMoves mir_type use_secondary inst_moves)
then concatMap (possibleMoves mir_type use_secondary inst_moves
(rest_mig, primary_drained))
nodes
else []
in
......@@ -607,10 +625,11 @@ checkInstanceMove nodes_idx disk_moves inst_moves ini_tbl target =
checkMove :: [Ndx] -- ^ Allowed target node indices
-> Bool -- ^ Whether disk moves are allowed
-> Bool -- ^ Whether instance moves are allowed
-> Bool -- ^ Whether migration is restricted
-> Table -- ^ The current solution
-> [Instance.Instance] -- ^ List of instances still to move
-> Table -- ^ The new solution
checkMove nodes_idx disk_moves inst_moves ini_tbl victims =
checkMove nodes_idx disk_moves inst_moves rest_mig ini_tbl victims =
let Table _ _ _ ini_plc = ini_tbl
-- we're using rwhnf from the Control.Parallel.Strategies
-- package; we don't need to use rnf as that would force too
......@@ -618,7 +637,7 @@ checkMove nodes_idx disk_moves inst_moves ini_tbl victims =
-- multi-threaded case the weak head normal form is enough to
-- spark the evaluation
tables = parMap rwhnf (checkInstanceMove nodes_idx disk_moves
inst_moves ini_tbl)
inst_moves rest_mig ini_tbl)
victims
-- iterate over all instances, computing the best move
best_tbl = foldl' compareTables ini_tbl tables
......@@ -642,10 +661,11 @@ tryBalance :: Table -- ^ The starting table
-> Bool -- ^ Allow disk moves
-> Bool -- ^ Allow instance moves
-> Bool -- ^ Only evacuate moves
-> Bool -- ^ Restrict migration
-> Score -- ^ Min gain threshold
-> Score -- ^ Min gain
-> Maybe Table -- ^ The resulting table and commands
tryBalance ini_tbl disk_moves inst_moves evac_mode mg_limit min_gain =
tryBalance ini_tbl disk_moves inst_moves evac_mode rest_mig mg_limit min_gain =
let Table ini_nl ini_il ini_cv _ = ini_tbl
all_inst = Container.elems ini_il
all_nodes = Container.elems ini_nl
......@@ -658,7 +678,8 @@ tryBalance ini_tbl disk_moves inst_moves evac_mode mg_limit min_gain =
reloc_inst = filter (\i -> Instance.movable i &&
Instance.autoBalance i) all_inst'
node_idx = map Node.idx online_nodes
fin_tbl = checkMove node_idx disk_moves inst_moves ini_tbl reloc_inst
fin_tbl = checkMove node_idx disk_moves inst_moves rest_mig
ini_tbl reloc_inst
(Table _ _ fin_cv _) = fin_tbl
in
if fin_cv < ini_cv && (ini_cv > mg_limit || ini_cv - fin_cv >= min_gain)
......
......@@ -73,6 +73,7 @@ options = do
, oPrintCommands
, oDataFile
, oEvacMode
, oRestrictedMigrate
, oRapiMaster
, luxi
, oIAllocSrc
......@@ -122,6 +123,7 @@ iterateDepth :: Bool -- ^ Whether to print moves
-> Int -- ^ Remaining length
-> Bool -- ^ Allow disk moves
-> Bool -- ^ Allow instance moves
-> Bool -- ^ Resrict migration
-> Int -- ^ Max node name len
-> Int -- ^ Max instance name len
-> [MoveJob] -- ^ Current command list
......@@ -131,13 +133,13 @@ iterateDepth :: Bool -- ^ Whether to print moves
-> Bool -- ^ Enable evacuation mode
-> IO (Cluster.Table, [MoveJob]) -- ^ The resulting table
-- and commands
iterateDepth printmove ini_tbl max_rounds disk_moves inst_moves nmlen imlen
cmd_strs min_score mg_limit min_gain evac_mode =
iterateDepth printmove ini_tbl max_rounds disk_moves inst_moves rest_mig nmlen
imlen cmd_strs min_score mg_limit min_gain evac_mode =
let Cluster.Table ini_nl ini_il _ _ = ini_tbl
allowed_next = Cluster.doNextBalance ini_tbl max_rounds min_score
m_fin_tbl = if allowed_next
then Cluster.tryBalance ini_tbl disk_moves inst_moves
evac_mode mg_limit min_gain
evac_mode rest_mig mg_limit min_gain
else Nothing
in case m_fin_tbl of
Just fin_tbl ->
......@@ -154,7 +156,7 @@ iterateDepth printmove ini_tbl max_rounds disk_moves inst_moves nmlen imlen
putStrLn sol_line
hFlush stdout
iterateDepth printmove fin_tbl max_rounds disk_moves inst_moves
nmlen imlen upd_cmd_strs min_score
rest_mig nmlen imlen upd_cmd_strs min_score
mg_limit min_gain evac_mode
Nothing -> return (ini_tbl, cmd_strs)
......@@ -375,6 +377,7 @@ main opts args = do
(fin_tbl, cmd_strs) <- iterateDepth True ini_tbl (optMaxLength opts)
(optDiskMoves opts)
(optInstMoves opts)
(optRestrictedMigrate opts)
nmlen imlen [] min_cv
(optMinGainLim opts) (optMinGain opts)
(optEvacMode opts)
......
......@@ -261,6 +261,7 @@ executeSimulation opts ini_tbl min_cv gidx nl il = do
(optMaxLength opts)
(optDiskMoves opts)
(optInstMoves opts)
False
nmlen imlen [] min_cv
(optMinGainLim opts) (optMinGain opts)
(optEvacMode opts)
......
......@@ -110,7 +110,7 @@ balance :: (Node.List, Instance.List)
balance (nl, il) =
let ini_cv = Cluster.compCV nl
ini_tbl = Cluster.Table nl il ini_cv []
balanceStep tbl = Cluster.tryBalance tbl True True False 0.0 0.0
balanceStep tbl = Cluster.tryBalance tbl True True False False 0.0 0.0
bTables = map fromJust . takeWhile isJust
$ iterate (>>= balanceStep) (Just ini_tbl)
(Cluster.Table nl' il' _ _) = last bTables
......
group-01|fake-uuid-01|preferred||
node-E|4000|0|2000|6000|3000|32|Y|fake-uuid-01|4
node-1|4000|0|3000|6000|4000|32|N|fake-uuid-01|4
node-2|4000|0|3000|6000|1000|32|N|fake-uuid-01|4
node-3|4000|0|2000|6000|2000|32|N|fake-uuid-01|4
inst-p1|1000|1000|1|running|Y|node-E|node-1|drbd||1
inst-p2|1000|1000|1|running|Y|node-E|node-3|drbd||1
inst-s1|1000|1000|1|running|Y|node-2|node-E|drbd||1
inst-12|1000|1000|1|running|Y|node-1|node-2|drbd||1
inst-32a|1000|1000|1|running|Y|node-3|node-2|drbd||1
inst-32b|1000|1000|1|running|Y|node-3|node-2|drbd||1
|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
group-01|128,1,1024,1,1,1|128,1,1024,1,1,1;32768,8,1048576,16,8,12|diskless,file,sharedfile,plain,blockdev,drbd,rbd,ext|4.0|32.0
......@@ -68,7 +68,7 @@ isNodeBig size node = Node.availDisk node > size * Types.unitDsk
&& Node.availCpu node > size * Types.unitCpu
canBalance :: Cluster.Table -> Bool -> Bool -> Bool -> Bool
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac 0 0
canBalance tbl dm im evac = isJust $ Cluster.tryBalance tbl dm im evac False 0 0
-- | Assigns a new fresh instance to a cluster; this is not
-- allocation, so no resource checks are done.
......
./test/hs/hbal -t $TESTDATA_DIR/hbal-evac.data
>>>/inst-32. node-3:node-2 => node-2:node-1.*
(.|
)*Solution length=4/
>>>= 0
./test/hs/hbal --evac-mode -t $TESTDATA_DIR/hbal-evac.data
>>>/a=f r:node-1 f
(.|
)*Solution length=3/
>>>= 0
./test/hs/hbal --evac-mode --restricted-migration -t $TESTDATA_DIR/hbal-evac.data
>>>/a=f r:node-1
(.|
)*Solution length=3/
>>>= 0
......@@ -32,6 +32,7 @@ class ParseVersionTest(unittest.TestCase):
self.assertEquals(version.ParseVersion("2.10"), (2, 10, 0))
self.assertEquals(version.ParseVersion("2.10.1"), (2, 10, 1))
self.assertEquals(version.ParseVersion("2.10.1~beta2"), (2, 10, 1))
self.assertEquals(version.ParseVersion("2.10.1-3"), (2, 10, 1))
self.assertEquals(version.ParseVersion("2"), None)
self.assertEquals(version.ParseVersion("pink bunny"), None)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment