[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231215171020.687342-11-bigeasy@linutronix.de>
Date: Fri, 15 Dec 2023 18:07:29 +0100
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: linux-kernel@...r.kernel.org,
netdev@...r.kernel.org
Cc: "David S. Miller" <davem@...emloft.net>,
Boqun Feng <boqun.feng@...il.com>,
Daniel Borkmann <daniel@...earbox.net>,
Eric Dumazet <edumazet@...gle.com>,
Frederic Weisbecker <frederic@...nel.org>,
Ingo Molnar <mingo@...hat.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>,
Waiman Long <longman@...hat.com>,
Will Deacon <will@...nel.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Subject: [PATCH net-next 10/24] dev: Use nested-BH locking for softnet_data.process_queue.
softnet_data::process_queue is a per-CPU variable and relies on disabled
BH for its locking. Without per-CPU locking in local_bh_disable() on
PREEMPT_RT this data structure requires explicit locking.
Add a local_lock_t to softnet_data and use local_lock_nested_bh() for locking
of process_queue. This change adds only lockdep coverage and does not
alter the functional behaviour for !PREEMPT_RT.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
---
include/linux/netdevice.h | 1 +
net/core/dev.c | 17 +++++++++++++++--
2 files changed, 16 insertions(+), 2 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 06436695c3679..88e27d3c39da2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3238,6 +3238,7 @@ static inline bool dev_has_header(const struct net_device *dev)
struct softnet_data {
struct list_head poll_list;
struct sk_buff_head process_queue;
+ local_lock_t process_queue_bh_lock;
/* stats */
unsigned int processed;
diff --git a/net/core/dev.c b/net/core/dev.c
index 09232080843ee..5a0f6da7b3ae5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -440,7 +440,9 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
* queue in the local softnet handler.
*/
-DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = {
+ .process_queue_bh_lock = INIT_LOCAL_LOCK(process_queue_bh_lock),
+};
EXPORT_PER_CPU_SYMBOL(softnet_data);
#ifdef CONFIG_LOCKDEP
@@ -5838,6 +5840,7 @@ static void flush_backlog(struct work_struct *work)
}
rps_unlock_irq_enable(sd);
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -5845,6 +5848,7 @@ static void flush_backlog(struct work_struct *work)
input_queue_head_incr(sd);
}
}
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
local_bh_enable();
}
@@ -5966,15 +5970,22 @@ static int process_backlog(struct napi_struct *napi, int quota)
while (again) {
struct sk_buff *skb;
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
while ((skb = __skb_dequeue(&sd->process_queue))) {
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
+
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
input_queue_head_incr(sd);
- if (++work >= quota)
+ if (++work >= quota) {
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
return work;
+ }
}
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
rps_lock_irq_disable(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
@@ -5989,8 +6000,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
napi->state = 0;
again = false;
} else {
+ local_lock_nested_bh(&softnet_data.process_queue_bh_lock);
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
+ local_unlock_nested_bh(&softnet_data.process_queue_bh_lock);
}
rps_unlock_irq_enable(sd);
}
--
2.43.0
Powered by blists - more mailing lists