[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250926151304.1897276-2-edumazet@google.com>
Date: Fri, 26 Sep 2025 15:13:02 +0000
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: Simon Horman <horms@...nel.org>, Kuniyuki Iwashima <kuniyu@...gle.com>,
Willem de Bruijn <willemb@...gle.com>, netdev@...r.kernel.org, eric.dumazet@...il.com,
Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH net-next 1/3] net: make softnet_data.defer_count an atomic
This is preparation work to remove the softnet_data.defer_lock,
as it is contended on hosts with large number of cores.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
---
include/linux/netdevice.h | 2 +-
net/core/dev.c | 2 +-
net/core/skbuff.c | 6 ++----
3 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1b85454116f666ced61a1450d3f899940f499c05..27e3fa69253f694b98d32b6138cf491da5a8b824 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3538,7 +3538,7 @@ struct softnet_data {
/* Another possibly contended cache line */
spinlock_t defer_lock ____cacheline_aligned_in_smp;
- int defer_count;
+ atomic_t defer_count;
int defer_ipi_scheduled;
struct sk_buff *defer_list;
call_single_data_t defer_csd;
diff --git a/net/core/dev.c b/net/core/dev.c
index 8b54fdf0289ab223fc37d27a078536db37646b55..8566678d83444e8aacbfea4842878279cf28516f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6726,7 +6726,7 @@ static void skb_defer_free_flush(struct softnet_data *sd)
spin_lock(&sd->defer_lock);
skb = sd->defer_list;
sd->defer_list = NULL;
- sd->defer_count = 0;
+ atomic_set(&sd->defer_count, 0);
spin_unlock(&sd->defer_lock);
while (skb != NULL) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index daaf6da43cc9e199389c3afcd6621c177d247884..f91571f51c69ecf8c2fffed5f3a3cd33fd95828b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -7201,14 +7201,12 @@ nodefer: kfree_skb_napi_cache(skb);
sd = &per_cpu(softnet_data, cpu);
defer_max = READ_ONCE(net_hotdata.sysctl_skb_defer_max);
- if (READ_ONCE(sd->defer_count) >= defer_max)
+ if (atomic_read(&sd->defer_count) >= defer_max)
goto nodefer;
spin_lock_bh(&sd->defer_lock);
/* Send an IPI every time queue reaches half capacity. */
- kick = sd->defer_count == (defer_max >> 1);
- /* Paired with the READ_ONCE() few lines above */
- WRITE_ONCE(sd->defer_count, sd->defer_count + 1);
+ kick = (atomic_inc_return(&sd->defer_count) - 1) == (defer_max >> 1);
skb->next = sd->defer_list;
/* Paired with READ_ONCE() in skb_defer_free_flush() */
--
2.51.0.536.g15c5d4f767-goog
Powered by blists - more mailing lists