[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251103165256.1712169-1-edumazet@google.com>
Date: Mon, 3 Nov 2025 16:52:56 +0000
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: Simon Horman <horms@...nel.org>, Kuniyuki Iwashima <kuniyu@...gle.com>, netdev@...r.kernel.org,
eric.dumazet@...il.com, Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH net-next] net: mark deliver_skb() as unlikely and not inlined
deliver_skb() should not be inlined as is it not called
in the fast path.
Add unlikely() clauses giving hints to the compiler about this fact.
Before this patch:
size net/core/dev.o
text data bss dec hex filename
121794 13330 176 135300 21084 net/core/dev.o
__netif_receive_skb_core() size on x86_64 : 4080 bytes.
After:
size net/core/dev.o
text data bss dec hex filenamee
120330 13338 176 133844 20ad4 net/core/dev.o
__netif_receive_skb_core() size on x86_64 : 2781 bytes.
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
---
net/core/dev.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/net/core/dev.c b/net/core/dev.c
index dccc1176f3c6565f96a7e2b5f42d009ef6435496..6886632b57605778284bb3dabdd05dfae5df37e0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2463,9 +2463,9 @@ int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
}
-static inline int deliver_skb(struct sk_buff *skb,
- struct packet_type *pt_prev,
- struct net_device *orig_dev)
+static int deliver_skb(struct sk_buff *skb,
+ struct packet_type *pt_prev,
+ struct net_device *orig_dev)
{
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
return -ENOMEM;
@@ -2484,7 +2484,7 @@ static inline void deliver_ptype_list_skb(struct sk_buff *skb,
list_for_each_entry_rcu(ptype, ptype_list, list) {
if (ptype->type != type)
continue;
- if (pt_prev)
+ if (unlikely(pt_prev))
deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
}
@@ -2545,7 +2545,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
if (skb_loop_sk(ptype, skb))
continue;
- if (pt_prev) {
+ if (unlikely(pt_prev)) {
deliver_skb(skb2, pt_prev, skb->dev);
pt_prev = ptype;
continue;
@@ -4421,7 +4421,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
return skb;
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
- if (*pt_prev) {
+ if (unlikely(*pt_prev)) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
@@ -5883,7 +5883,7 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
if (nf_hook_ingress_active(skb)) {
int ingress_retval;
- if (*pt_prev) {
+ if (unlikely(*pt_prev)) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = NULL;
}
@@ -5960,13 +5960,13 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
list_for_each_entry_rcu(ptype, &dev_net_rcu(skb->dev)->ptype_all,
list) {
- if (pt_prev)
+ if (unlikely(pt_prev))
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
}
list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
- if (pt_prev)
+ if (unlikely(pt_prev))
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
}
@@ -5997,7 +5997,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
}
if (skb_vlan_tag_present(skb)) {
- if (pt_prev) {
+ if (unlikely(pt_prev)) {
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
}
@@ -6009,7 +6009,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
rx_handler = rcu_dereference(skb->dev->rx_handler);
if (rx_handler) {
- if (pt_prev) {
+ if (unlikely(pt_prev)) {
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL;
}
--
2.51.1.930.gacf6e81ea2-goog
Powered by blists - more mailing lists