[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20251202141149.4144248-2-skorodumov.dmitry@huawei.com>
Date: Tue, 2 Dec 2025 17:11:48 +0300
From: Dmitry Skorodumov <skorodumov.dmitry@...wei.com>
To: <netdev@...r.kernel.org>, Dmitry Skorodumov
<skorodumov.dmitry@...wei.com>, Xiao Liang <shaw.leon@...il.com>, Kuniyuki
Iwashima <kuniyu@...gle.com>, Jakub Kicinski <kuba@...nel.org>, Stanislav
Fomichev <sdf@...ichev.me>, Etienne Champetier
<champetier.etienne@...il.com>, Paolo Abeni <pabeni@...hat.com>, "David S.
Miller" <davem@...emloft.net>, <linux-kernel@...r.kernel.org>
CC: Andrew Lunn <andrew+netdev@...n.ch>, Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH net 1/2] ipvlan: Make the addrs_lock be per port
Make the addrs_lock be per port, not per ipvlan dev.
This appears to be a very minor problem though.
Since it's highly unlikely that ipvlan_add_addr() will
be called on 2 CPU simultaneously. But nevertheless,
this may cause:
1. False-negative of ipvlan_addr_busy(): one interface
iterated through all port->ipvlans + ipvlan->addrs
under some ipvlan spinlock, and another added IP
under its own lock. Though this is only possible
for IPv6, since looks like only ipvlan_addr6_event() can be
called without rtnl_lock.
2. Race since ipvlan_ht_addr_add(port) is called under
different ipvlan->addrs_lock locks
This should not affect performance, since add/remove IP
is a rare situation and spinlock is not locked on fast
paths.
Fixes: 8230819494b3 ("ipvlan: use per device spinlock to protect addrs list updates")
Signed-off-by: Dmitry Skorodumov <skorodumov.dmitry@...wei.com>
CC: Paolo Abeni <pabeni@...hat.com>
---
drivers/net/ipvlan/ipvlan.h | 2 +-
drivers/net/ipvlan/ipvlan_main.c | 20 ++++++++++----------
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 50de3ee204db..80f84fc87008 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -69,7 +69,6 @@ struct ipvl_dev {
DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
netdev_features_t sfeatures;
u32 msg_enable;
- spinlock_t addrs_lock;
};
struct ipvl_addr {
@@ -90,6 +89,7 @@ struct ipvl_port {
struct net_device *dev;
possible_net_t pnet;
struct hlist_head hlhead[IPVLAN_HASH_SIZE];
+ spinlock_t addrs_lock; /* guards hash-table and addrs */
struct list_head ipvlans;
u16 mode;
u16 flags;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 660f3db11766..c390f4241621 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -75,6 +75,7 @@ static int ipvlan_port_create(struct net_device *dev)
for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
INIT_HLIST_HEAD(&port->hlhead[idx]);
+ spin_lock_init(&port->addrs_lock);
skb_queue_head_init(&port->backlog);
INIT_WORK(&port->wq, ipvlan_process_multicast);
ida_init(&port->ida);
@@ -579,7 +580,6 @@ int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params,
if (!tb[IFLA_MTU])
ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
- spin_lock_init(&ipvlan->addrs_lock);
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
@@ -657,13 +657,13 @@ void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr, *next;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
kfree_rcu(addr, rcu);
}
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
ida_free(&ipvlan->port->ida, dev->dev_id);
list_del_rcu(&ipvlan->pnode);
@@ -847,16 +847,16 @@ static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
addr = ipvlan_find_addr(ipvlan, iaddr, is_v6);
if (!addr) {
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return;
}
ipvlan_ht_addr_del(addr);
list_del_rcu(&addr->anode);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
kfree_rcu(addr, rcu);
}
@@ -878,14 +878,14 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
int ret = -EINVAL;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv6=%pI6c addr for %s intf\n",
ip6_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip6_addr, true);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return ret;
}
@@ -946,14 +946,14 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
int ret = -EINVAL;
- spin_lock_bh(&ipvlan->addrs_lock);
+ spin_lock_bh(&ipvlan->port->addrs_lock);
if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv4=%pI4 on %s intf.\n",
ip4_addr, ipvlan->dev->name);
else
ret = ipvlan_add_addr(ipvlan, ip4_addr, false);
- spin_unlock_bh(&ipvlan->addrs_lock);
+ spin_unlock_bh(&ipvlan->port->addrs_lock);
return ret;
}
--
2.25.1
Powered by blists - more mailing lists