[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <27e2c981fb7ac08d9e9dd000a0e284a073571bde.1764943231.git.pabeni@redhat.com>
Date: Fri, 5 Dec 2025 15:03:31 +0100
From: Paolo Abeni <pabeni@...hat.com>
To: netdev@...r.kernel.org
Cc: "David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jakub Kicinski <kuba@...nel.org>,
Simon Horman <horms@...nel.org>,
Neal Cardwell <ncardwell@...gle.com>,
Kuniyuki Iwashima <kuniyu@...gle.com>,
David Ahern <dsahern@...nel.org>
Subject: [RFC PATCH 2/2] net: gro: set the transport header later
After the previous patch, the GRO engine receive callbacks don't relay
anymore on the skb transport header being set.
Move such operation at GRO complete time, with one notable exception:
SKB_GSO_FRAGLIST offload need the headers to be set on each skb in
the list prior to segmentation.
This prevents the NAPI gro_cell instance on top of a geneve tunnel
with GRO hints enabled from corrupting the GRO-hint-aggregated packet
setting the (innermost) transport header to the middle-one before
stopping the GRO process due to the encap mark.
Signed-off-by: Paolo Abeni <pabeni@...hat.com>
---
net/ipv4/af_inet.c | 2 +-
net/ipv4/tcp_offload.c | 1 +
net/ipv4/udp_offload.c | 4 ++++
net/ipv6/ip6_offload.c | 3 +--
4 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 08d811f11896..f954ab78481a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1527,7 +1527,6 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
* as we already checked checksum over ipv4 header was 0
*/
skb_gro_pull(skb, sizeof(*iph));
- skb_set_transport_header(skb, skb_gro_offset(skb));
pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
ops->callbacks.gro_receive, head, skb);
@@ -1611,6 +1610,7 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
goto out;
+ skb_set_transport_header(skb, nhoff + sizeof(*iph));
/* Only need to add sizeof(*iph) to get to the next hdr below
* because any hdr with option will have been flushed in
* inet_gro_receive().
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index fa36686df6d7..a78d9b15de06 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -334,6 +334,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
flush |= skb->csum_level != p->csum_level;
flush |= NAPI_GRO_CB(p)->count >= 64;
skb_set_network_header(skb, skb_gro_receive_network_offset(skb));
+ skb_set_transport_header(skb, (unsigned char *)th - skb->data);
if (flush || skb_gro_receive_list(p, skb))
mss = 1;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 7048cb2a28a2..73edbc154cfa 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -751,6 +751,8 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
pp = p;
} else {
if (NAPI_GRO_CB(skb)->is_flist) {
+ int offset;
+
if (!pskb_may_pull(skb, skb_gro_offset(skb))) {
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
@@ -761,6 +763,8 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
return NULL;
}
skb_set_network_header(skb, skb_gro_receive_network_offset(skb));
+ offset = (unsigned char *)uh - skb->data;
+ skb_set_transport_header(skb, offset);
ret = skb_gro_receive_list(p, skb);
} else {
skb_gro_postpull_rcsum(skb, uh,
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index fce91183797a..ed71cbd45690 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -256,8 +256,6 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
skb_gro_pull(skb, sizeof(*iph));
}
- skb_set_transport_header(skb, skb_gro_offset(skb));
-
NAPI_GRO_CB(skb)->proto = proto;
flush--;
@@ -382,6 +380,7 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
goto out;
+ skb_set_transport_header(skb, nhoff);
err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
udp6_gro_complete, skb, nhoff);
--
2.52.0
Powered by blists - more mailing lists