Commit 213076fe authored by Dimitris Aragiorgis's avatar Dimitris Aragiorgis Committed by Michael Hanselmann
Browse files

Fix locking in networks



Ensure that locks are held only if needed.

Add conflicts_check in OpNetworkAdd. This is needed if we want to
check whether nodes/master IPs are included in network.

Depending on conflicts_check value, we have to hold node/instance locks
during OpNetworkAdd/OpNetworkConnect or not.
Signed-off-by: default avatarDimitris Aragiorgis <dimara@grnet.gr>
Signed-off-by: default avatarMichael Hanselmann <hansmi@google.com>
Reviewed-by: default avatarMichael Hanselmann <hansmi@google.com>
parent 9de303af
...@@ -71,6 +71,7 @@ def AddNetwork(opts, args): ...@@ -71,6 +71,7 @@ def AddNetwork(opts, args):
mac_prefix=opts.mac_prefix, mac_prefix=opts.mac_prefix,
network_type=opts.network_type, network_type=opts.network_type,
add_reserved_ips=_HandleReservedIPs(opts.add_reserved_ips), add_reserved_ips=_HandleReservedIPs(opts.add_reserved_ips),
conflicts_check=opts.conflicts_check,
tags=tags) tags=tags)
SubmitOpCode(op, opts=opts) SubmitOpCode(op, opts=opts)
...@@ -293,8 +294,9 @@ def RemoveNetwork(opts, args): ...@@ -293,8 +294,9 @@ def RemoveNetwork(opts, args):
commands = { commands = {
"add": ( "add": (
AddNetwork, ARGS_ONE_NETWORK, AddNetwork, ARGS_ONE_NETWORK,
[DRY_RUN_OPT, NETWORK_OPT, GATEWAY_OPT, ADD_RESERVED_IPS_OPT, TAG_ADD_OPT, [DRY_RUN_OPT, NETWORK_OPT, GATEWAY_OPT, ADD_RESERVED_IPS_OPT,
MAC_PREFIX_OPT, NETWORK_TYPE_OPT, NETWORK6_OPT, GATEWAY6_OPT], MAC_PREFIX_OPT, NETWORK_TYPE_OPT, NETWORK6_OPT, GATEWAY6_OPT,
NOCONFLICTSCHECK_OPT, TAG_ADD_OPT],
"<network_name>", "Add a new IP network to the cluster"), "<network_name>", "Add a new IP network to the cluster"),
"list": ( "list": (
ListNetworks, ARGS_MANY_NETWORKS, ListNetworks, ARGS_MANY_NETWORKS,
......
...@@ -15591,7 +15591,6 @@ class LUTestAllocator(NoHooksLU): ...@@ -15591,7 +15591,6 @@ class LUTestAllocator(NoHooksLU):
return result return result
   
   
# Network LUs
class LUNetworkAdd(LogicalUnit): class LUNetworkAdd(LogicalUnit):
"""Logical unit for creating networks. """Logical unit for creating networks.
   
...@@ -15609,7 +15608,15 @@ class LUNetworkAdd(LogicalUnit): ...@@ -15609,7 +15608,15 @@ class LUNetworkAdd(LogicalUnit):
   
def ExpandNames(self): def ExpandNames(self):
self.network_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) self.network_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
self.needed_locks = {}
if self.op.conflicts_check:
self.share_locks[locking.LEVEL_NODE] = 1
self.needed_locks = {
locking.LEVEL_NODE: locking.ALL_SET,
}
else:
self.needed_locks = {}
self.add_locks[locking.LEVEL_NETWORK] = self.network_uuid self.add_locks[locking.LEVEL_NETWORK] = self.network_uuid
   
def CheckPrereq(self): def CheckPrereq(self):
...@@ -15674,21 +15681,22 @@ class LUNetworkAdd(LogicalUnit): ...@@ -15674,21 +15681,22 @@ class LUNetworkAdd(LogicalUnit):
# Check if we need to reserve the nodes and the cluster master IP # Check if we need to reserve the nodes and the cluster master IP
# These may not be allocated to any instances in routed mode, as # These may not be allocated to any instances in routed mode, as
# they wouldn't function anyway. # they wouldn't function anyway.
for node in self.cfg.GetAllNodesInfo().values(): if self.op.conflicts_check:
for ip in [node.primary_ip, node.secondary_ip]: for node in self.cfg.GetAllNodesInfo().values():
try: for ip in [node.primary_ip, node.secondary_ip]:
pool.Reserve(ip) try:
self.LogInfo("Reserved node %s's IP (%s)", node.name, ip) pool.Reserve(ip)
self.LogInfo("Reserved node %s's IP (%s)", node.name, ip)
   
except errors.AddressPoolError: except errors.AddressPoolError:
pass pass
   
master_ip = self.cfg.GetClusterInfo().master_ip master_ip = self.cfg.GetClusterInfo().master_ip
try: try:
pool.Reserve(master_ip) pool.Reserve(master_ip)
self.LogInfo("Reserved cluster master IP (%s)", master_ip) self.LogInfo("Reserved cluster master IP (%s)", master_ip)
except errors.AddressPoolError: except errors.AddressPoolError:
pass pass
   
if self.op.add_reserved_ips: if self.op.add_reserved_ips:
for ip in self.op.add_reserved_ips: for ip in self.op.add_reserved_ips:
...@@ -15716,8 +15724,11 @@ class LUNetworkRemove(LogicalUnit): ...@@ -15716,8 +15724,11 @@ class LUNetworkRemove(LogicalUnit):
if not self.network_uuid: if not self.network_uuid:
raise errors.OpPrereqError("Network %s not found" % self.op.network_name, raise errors.OpPrereqError("Network %s not found" % self.op.network_name,
errors.ECODE_INVAL) errors.ECODE_INVAL)
self.share_locks[locking.LEVEL_NODEGROUP] = 1
self.needed_locks = { self.needed_locks = {
locking.LEVEL_NETWORK: [self.network_uuid], locking.LEVEL_NETWORK: [self.network_uuid],
locking.LEVEL_NODEGROUP: locking.ALL_SET,
} }
   
def CheckPrereq(self): def CheckPrereq(self):
...@@ -16059,11 +16070,11 @@ class LUNetworkConnect(LogicalUnit): ...@@ -16059,11 +16070,11 @@ class LUNetworkConnect(LogicalUnit):
raise errors.OpPrereqError("Group %s does not exist" % raise errors.OpPrereqError("Group %s does not exist" %
self.group_name, errors.ECODE_INVAL) self.group_name, errors.ECODE_INVAL)
   
self.share_locks[locking.LEVEL_INSTANCE] = 1
self.needed_locks = { self.needed_locks = {
locking.LEVEL_INSTANCE: [], locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [self.group_uuid], locking.LEVEL_NODEGROUP: [self.group_uuid],
} }
self.share_locks[locking.LEVEL_INSTANCE] = 1
   
def DeclareLocks(self, level): def DeclareLocks(self, level):
if level == locking.LEVEL_INSTANCE: if level == locking.LEVEL_INSTANCE:
...@@ -16071,8 +16082,10 @@ class LUNetworkConnect(LogicalUnit): ...@@ -16071,8 +16082,10 @@ class LUNetworkConnect(LogicalUnit):
   
# Lock instances optimistically, needs verification once group lock has # Lock instances optimistically, needs verification once group lock has
# been acquired # been acquired
self.needed_locks[locking.LEVEL_INSTANCE] = \ if self.op.conflicts_check:
self.cfg.GetNodeGroupInstances(self.group_uuid) self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetNodeGroupInstances(self.group_uuid)
self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid]
   
def BuildHooksEnv(self): def BuildHooksEnv(self):
ret = { ret = {
...@@ -16158,7 +16171,6 @@ class LUNetworkDisconnect(LogicalUnit): ...@@ -16158,7 +16171,6 @@ class LUNetworkDisconnect(LogicalUnit):
self.group_name, errors.ECODE_INVAL) self.group_name, errors.ECODE_INVAL)
   
self.needed_locks = { self.needed_locks = {
locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [self.group_uuid], locking.LEVEL_NODEGROUP: [self.group_uuid],
} }
self.share_locks[locking.LEVEL_INSTANCE] = 1 self.share_locks[locking.LEVEL_INSTANCE] = 1
...@@ -16169,8 +16181,9 @@ class LUNetworkDisconnect(LogicalUnit): ...@@ -16169,8 +16181,9 @@ class LUNetworkDisconnect(LogicalUnit):
   
# Lock instances optimistically, needs verification once group lock has # Lock instances optimistically, needs verification once group lock has
# been acquired # been acquired
self.needed_locks[locking.LEVEL_INSTANCE] = \ if self.op.conflicts_check:
self.cfg.GetNodeGroupInstances(self.group_uuid) self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetNodeGroupInstances(self.group_uuid)
   
def BuildHooksEnv(self): def BuildHooksEnv(self):
ret = { ret = {
......
...@@ -2029,6 +2029,8 @@ class OpNetworkAdd(OpCode): ...@@ -2029,6 +2029,8 @@ class OpNetworkAdd(OpCode):
"MAC address prefix that overrides cluster one"), "MAC address prefix that overrides cluster one"),
("add_reserved_ips", None, _TMaybeAddr4List, ("add_reserved_ips", None, _TMaybeAddr4List,
"Which IP addresses to reserve"), "Which IP addresses to reserve"),
("conflicts_check", True, ht.TBool,
"Whether to check for conflicting IP addresses"),
("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Network tags"), ("tags", ht.EmptyList, ht.TListOf(ht.TNonEmptyString), "Network tags"),
] ]
OP_RESULT = ht.TNone OP_RESULT = ht.TNone
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment