Skip to content
Snippets Groups Projects
Commit 213076fe authored by Dimitris Aragiorgis's avatar Dimitris Aragiorgis Committed by Michael Hanselmann
Browse files

Fix locking in networks


Ensure that locks are held only if needed.

Add conflicts_check in OpNetworkAdd. This is needed if we want to
check whether nodes/master IPs are included in network.

Depending on conflicts_check value, we have to hold node/instance locks
during OpNetworkAdd/OpNetworkConnect or not.

Signed-off-by: default avatarDimitris Aragiorgis <dimara@grnet.gr>
Signed-off-by: default avatarMichael Hanselmann <hansmi@google.com>
Reviewed-by: default avatarMichael Hanselmann <hansmi@google.com>
parent 9de303af
No related branches found
No related tags found
No related merge requests found
......@@ -71,6 +71,7 @@ def AddNetwork(opts, args):
mac_prefix=opts.mac_prefix,
network_type=opts.network_type,
add_reserved_ips=_HandleReservedIPs(opts.add_reserved_ips),
conflicts_check=opts.conflicts_check,
tags=tags)
SubmitOpCode(op, opts=opts)
......@@ -293,8 +294,9 @@ def RemoveNetwork(opts, args):
commands = {
"add": (
AddNetwork, ARGS_ONE_NETWORK,
[DRY_RUN_OPT, NETWORK_OPT, GATEWAY_OPT, ADD_RESERVED_IPS_OPT, TAG_ADD_OPT,
MAC_PREFIX_OPT, NETWORK_TYPE_OPT, NETWORK6_OPT, GATEWAY6_OPT],
[DRY_RUN_OPT, NETWORK_OPT, GATEWAY_OPT, ADD_RESERVED_IPS_OPT,
MAC_PREFIX_OPT, NETWORK_TYPE_OPT, NETWORK6_OPT, GATEWAY6_OPT,
NOCONFLICTSCHECK_OPT, TAG_ADD_OPT],
"<network_name>", "Add a new IP network to the cluster"),
"list": (
ListNetworks, ARGS_MANY_NETWORKS,
......
......@@ -15591,7 +15591,6 @@ class LUTestAllocator(NoHooksLU):
return result
 
 
# Network LUs
class LUNetworkAdd(LogicalUnit):
"""Logical unit for creating networks.
 
......@@ -15609,7 +15608,15 @@ class LUNetworkAdd(LogicalUnit):
 
def ExpandNames(self):
self.network_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
self.needed_locks = {}
if self.op.conflicts_check:
self.share_locks[locking.LEVEL_NODE] = 1
self.needed_locks = {
locking.LEVEL_NODE: locking.ALL_SET,
}
else:
self.needed_locks = {}
self.add_locks[locking.LEVEL_NETWORK] = self.network_uuid
 
def CheckPrereq(self):
......@@ -15674,21 +15681,22 @@ class LUNetworkAdd(LogicalUnit):
# Check if we need to reserve the nodes and the cluster master IP
# These may not be allocated to any instances in routed mode, as
# they wouldn't function anyway.
for node in self.cfg.GetAllNodesInfo().values():
for ip in [node.primary_ip, node.secondary_ip]:
try:
pool.Reserve(ip)
self.LogInfo("Reserved node %s's IP (%s)", node.name, ip)
if self.op.conflicts_check:
for node in self.cfg.GetAllNodesInfo().values():
for ip in [node.primary_ip, node.secondary_ip]:
try:
pool.Reserve(ip)
self.LogInfo("Reserved node %s's IP (%s)", node.name, ip)
 
except errors.AddressPoolError:
pass
except errors.AddressPoolError:
pass
 
master_ip = self.cfg.GetClusterInfo().master_ip
try:
pool.Reserve(master_ip)
self.LogInfo("Reserved cluster master IP (%s)", master_ip)
except errors.AddressPoolError:
pass
master_ip = self.cfg.GetClusterInfo().master_ip
try:
pool.Reserve(master_ip)
self.LogInfo("Reserved cluster master IP (%s)", master_ip)
except errors.AddressPoolError:
pass
 
if self.op.add_reserved_ips:
for ip in self.op.add_reserved_ips:
......@@ -15716,8 +15724,11 @@ class LUNetworkRemove(LogicalUnit):
if not self.network_uuid:
raise errors.OpPrereqError("Network %s not found" % self.op.network_name,
errors.ECODE_INVAL)
self.share_locks[locking.LEVEL_NODEGROUP] = 1
self.needed_locks = {
locking.LEVEL_NETWORK: [self.network_uuid],
locking.LEVEL_NODEGROUP: locking.ALL_SET,
}
 
def CheckPrereq(self):
......@@ -16059,11 +16070,11 @@ class LUNetworkConnect(LogicalUnit):
raise errors.OpPrereqError("Group %s does not exist" %
self.group_name, errors.ECODE_INVAL)
 
self.share_locks[locking.LEVEL_INSTANCE] = 1
self.needed_locks = {
locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [self.group_uuid],
}
self.share_locks[locking.LEVEL_INSTANCE] = 1
 
def DeclareLocks(self, level):
if level == locking.LEVEL_INSTANCE:
......@@ -16071,8 +16082,10 @@ class LUNetworkConnect(LogicalUnit):
 
# Lock instances optimistically, needs verification once group lock has
# been acquired
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetNodeGroupInstances(self.group_uuid)
if self.op.conflicts_check:
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetNodeGroupInstances(self.group_uuid)
self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid]
 
def BuildHooksEnv(self):
ret = {
......@@ -16158,7 +16171,6 @@ class LUNetworkDisconnect(LogicalUnit):
self.group_name, errors.ECODE_INVAL)
 
self.needed_locks = {
locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [self.group_uuid],
}
self.share_locks[locking.LEVEL_INSTANCE] = 1
......@@ -16169,8 +16181,9 @@ class LUNetworkDisconnect(LogicalUnit):
 
# Lock instances optimistically, needs verification once group lock has
# been acquired
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetNodeGroupInstances(self.group_uuid)
if self.op.conflicts_check:
self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetNodeGroupInstances(self.group_uuid)
 
def BuildHooksEnv(self):
ret = {
......
......@@ -2029,6 +2029,8 @@ class OpNetworkAdd(OpCode):
"MAC address prefix that overrides cluster one"),
("add_reserved_ips", None, _TMaybeAddr4List,
"Which IP addresses to reserve"),
("conflicts_check", True, ht.TBool,
"Whether to check for conflicting IP addresses"),
("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Network tags"),
]
OP_RESULT = ht.TNone
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment