[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251024090517.3289181-1-edumazet@google.com>
Date: Fri, 24 Oct 2025 09:05:17 +0000
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: Simon Horman <horms@...nel.org>, Kuniyuki Iwashima <kuniyu@...gle.com>, netdev@...r.kernel.org,
eric.dumazet@...il.com, Eric Dumazet <edumazet@...gle.com>,
Willem de Bruijn <willemb@...gle.com>
Subject: [PATCH net-next] net: optimize enqueue_to_backlog() for the fast path
Add likely() and unlikely() clauses for the common cases:
Device is running.
Queue is not full.
Queue is less than half capacity.
Add max_backlog parameter to skb_flow_limit() to avoid
a second READ_ONCE(net_hotdata.max_backlog).
skb_flow_limit() does not need the backlog_lock protection,
and can be called before we acquire the lock, for even better
resistance to attacks.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
Cc: Willem de Bruijn <willemb@...gle.com>
---
net/core/dev.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/net/core/dev.c b/net/core/dev.c
index 378c2d010faf251ffd874ebf0cc3dd6968eee447..d32f0b0c03bbd069d3651f5a6b772c8029baf96c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5249,14 +5249,15 @@ void kick_defer_list_purge(unsigned int cpu)
int netdev_flow_limit_table_len __read_mostly = (1 << 12);
#endif
-static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
+static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen,
+ int max_backlog)
{
#ifdef CONFIG_NET_FLOW_LIMIT
- struct sd_flow_limit *fl;
- struct softnet_data *sd;
unsigned int old_flow, new_flow;
+ const struct softnet_data *sd;
+ struct sd_flow_limit *fl;
- if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
+ if (likely(qlen < (max_backlog >> 1)))
return false;
sd = this_cpu_ptr(&softnet_data);
@@ -5301,19 +5302,19 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
u32 tail;
reason = SKB_DROP_REASON_DEV_READY;
- if (!netif_running(skb->dev))
+ if (unlikely(!netif_running(skb->dev)))
goto bad_dev;
- reason = SKB_DROP_REASON_CPU_BACKLOG;
sd = &per_cpu(softnet_data, cpu);
qlen = skb_queue_len_lockless(&sd->input_pkt_queue);
max_backlog = READ_ONCE(net_hotdata.max_backlog);
- if (unlikely(qlen > max_backlog))
+ if (unlikely(qlen > max_backlog) ||
+ skb_flow_limit(skb, qlen, max_backlog))
goto cpu_backlog_drop;
backlog_lock_irq_save(sd, &flags);
qlen = skb_queue_len(&sd->input_pkt_queue);
- if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) {
+ if (likely(qlen <= max_backlog)) {
if (!qlen) {
/* Schedule NAPI for backlog device. We can use
* non atomic operation as we own the queue lock.
@@ -5334,6 +5335,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
backlog_unlock_irq_restore(sd, &flags);
cpu_backlog_drop:
+ reason = SKB_DROP_REASON_CPU_BACKLOG;
numa_drop_add(&sd->drop_counters, 1);
bad_dev:
dev_core_stats_rx_dropped_inc(skb->dev);
--
2.51.1.821.gb6fe4d2222-goog
Powered by blists - more mailing lists