[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260202082200.650516-1-edumazet@google.com>
Date: Mon, 2 Feb 2026 08:22:00 +0000
From: Eric Dumazet <edumazet@...gle.com>
To: "David S . Miller" <davem@...emloft.net>, Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: Simon Horman <horms@...nel.org>, Willem de Bruijn <willemb@...gle.com>, netdev@...r.kernel.org,
eric.dumazet@...il.com, Eric Dumazet <edumazet@...gle.com>
Subject: [PATCH net-next] net: add vlan_get_protocol_offset_inline() helper
skb_protocol() is bloated, and forces slow stack canaries in many
fast paths.
Add vlan_get_protocol_offset_inline() which deals with the non-vlan
common cases.
__vlan_get_protocol_offset() is now out of line.
It returns a type_depth struct to avoid stack canaries in callers.
struct type_depth {
__be16 type;
u16 depth;
};
$ scripts/bloat-o-meter -t vmlinux.0 vmlinux.1
add/remove: 0/2 grow/shrink: 0/22 up/down: 0/-6320 (-6320)
Function old new delta
vlan_get_protocol_dgram 61 59 -2
__pfx_skb_protocol 16 - -16
__vlan_get_protocol_offset 307 273 -34
tap_get_user 1374 1207 -167
ip_md_tunnel_xmit 1625 1452 -173
tap_sendmsg 940 753 -187
netif_skb_features 1079 866 -213
netem_enqueue 3017 2800 -217
vlan_parse_protocol 271 50 -221
tso_start 567 344 -223
fq_dequeue 1908 1685 -223
skb_network_protocol 434 205 -229
ip6_tnl_xmit 2639 2409 -230
br_dev_queue_push_xmit 474 236 -238
skb_protocol 258 - -258
packet_parse_headers 621 357 -264
__ip6_tnl_rcv 1306 1039 -267
skb_csum_hwoffload_help 515 224 -291
ip_tunnel_xmit 2635 2339 -296
sch_frag_xmit_hook 1582 1233 -349
bpf_skb_ecn_set_ce 868 457 -411
IP6_ECN_decapsulate 1297 768 -529
ip_tunnel_rcv 2121 1489 -632
ipip6_rcv 2572 1922 -650
Total: Before=24892803, After=24886483, chg -0.03%
Signed-off-by: Eric Dumazet <edumazet@...gle.com>
---
include/linux/if_vlan.h | 51 +++++++++++++++++------------------------
net/core/skbuff.c | 36 +++++++++++++++++++++++++++++
net/packet/af_packet.c | 5 ++--
3 files changed, 60 insertions(+), 32 deletions(-)
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index f7f34eb15e068785e464303b60f2f050b3bec0ec..f5e70e0a38256c23ef731ff8d7d6cc1449db42e3 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -594,8 +594,17 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
}
}
+struct type_depth {
+ __be16 type;
+ u16 depth;
+};
+
+struct type_depth __vlan_get_protocol_offset(const struct sk_buff *skb,
+ __be16 type,
+ int mac_offset);
+
/**
- * __vlan_get_protocol_offset() - get protocol EtherType.
+ * vlan_get_protocol_offset_inline() - get protocol EtherType.
* @skb: skbuff to query
* @type: first vlan protocol
* @mac_offset: MAC offset
@@ -604,40 +613,22 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
* Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
- __be16 type,
- int mac_offset,
- int *depth)
+static inline
+__be16 vlan_get_protocol_offset_inline(const struct sk_buff *skb,
+ __be16 type,
+ int mac_offset,
+ int *depth)
{
- unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
-
- /* if type is 802.1Q/AD then the header should already be
- * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
- * ETH_HLEN otherwise
- */
if (eth_type_vlan(type)) {
- if (vlan_depth) {
- if (WARN_ON(vlan_depth < VLAN_HLEN))
- return 0;
- vlan_depth -= VLAN_HLEN;
- } else {
- vlan_depth = ETH_HLEN;
- }
- do {
- struct vlan_hdr vhdr, *vh;
-
- vh = skb_header_pointer(skb, mac_offset + vlan_depth,
- sizeof(vhdr), &vhdr);
- if (unlikely(!vh || !--parse_depth))
- return 0;
+ struct type_depth res = __vlan_get_protocol_offset(skb, type, mac_offset);
- type = vh->h_vlan_encapsulated_proto;
- vlan_depth += VLAN_HLEN;
- } while (eth_type_vlan(type));
+ if (depth && res.type)
+ *depth = res.depth;
+ return res.type;
}
if (depth)
- *depth = vlan_depth;
+ *depth = skb->mac_len;
return type;
}
@@ -645,7 +636,7 @@ static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
int *depth)
{
- return __vlan_get_protocol_offset(skb, type, 0, depth);
+ return vlan_get_protocol_offset_inline(skb, type, 0, depth);
}
/**
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4d3920e5b141a02117186c3095f15f4f7a35b1df..35984a54b36fdf0b1b2590993ba08cdb403216e7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -7440,3 +7440,39 @@ void __put_netmem(netmem_ref netmem)
net_devmem_put_net_iov(netmem_to_net_iov(netmem));
}
EXPORT_SYMBOL(__put_netmem);
+
+struct type_depth __vlan_get_protocol_offset(const struct sk_buff *skb,
+ __be16 type,
+ int mac_offset)
+{
+ unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
+
+ /* if type is 802.1Q/AD then the header should already be
+ * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
+ * ETH_HLEN otherwise
+ */
+ if (vlan_depth) {
+ if (WARN_ON_ONCE(vlan_depth < VLAN_HLEN))
+ return (struct type_depth) { 0 };
+ vlan_depth -= VLAN_HLEN;
+ } else {
+ vlan_depth = ETH_HLEN;
+ }
+ do {
+ struct vlan_hdr vhdr, *vh;
+
+ vh = skb_header_pointer(skb, mac_offset + vlan_depth,
+ sizeof(vhdr), &vhdr);
+ if (unlikely(!vh || !--parse_depth))
+ return (struct type_depth) { 0 };
+
+ type = vh->h_vlan_encapsulated_proto;
+ vlan_depth += VLAN_HLEN;
+ } while (eth_type_vlan(type));
+
+ return (struct type_depth) {
+ .type = type,
+ .depth = vlan_depth
+ };
+}
+EXPORT_SYMBOL(__vlan_get_protocol_offset);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 494d628d10a5105a6a32788b4673993f218ec881..a1005359085a8336edc3c95eceaf101025e75489 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -572,8 +572,9 @@ static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
__be16 proto = skb->protocol;
if (unlikely(eth_type_vlan(proto)))
- proto = __vlan_get_protocol_offset(skb, proto,
- skb_mac_offset(skb), NULL);
+ proto = vlan_get_protocol_offset_inline(skb, proto,
+ skb_mac_offset(skb),
+ NULL);
return proto;
}
--
2.53.0.rc1.225.gd81095ad13-goog
Powered by blists - more mailing lists