[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240424180458.56211-5-nbd@nbd.name>
Date: Wed, 24 Apr 2024 20:04:56 +0200
From: Felix Fietkau <nbd@....name>
To: netdev@...r.kernel.org,
Eric Dumazet <edumazet@...gle.com>,
"David S. Miller" <davem@...emloft.net>,
David Ahern <dsahern@...nel.org>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>
Cc: willemdebruijn.kernel@...il.com,
linux-kernel@...r.kernel.org
Subject: [PATCH net-next 4/4] net: add heuristic for enabling TCP fraglist GRO
When forwarding TCP after GRO, software segmentation is very expensive,
especially when the checksum needs to be recalculated.
One case where that's currently unavoidable is when routing packets over
PPPoE. Performance improves significantly when using fraglist GRO
implemented in the same way as for UDP.
When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
socket in the same netns as the receiving device. While this may not
cover all relevant use cases in multi-netns configurations, it should be
good enough for most configurations that need this.
Here's a measurement of running 2 TCP streams through a MediaTek MT7622
device (2-core Cortex-A53), which runs NAT with flow offload enabled from
one ethernet port to PPPoE on another ethernet port + cake qdisc set to
1Gbps.
rx-gro-list off: 630 Mbit/s, CPU 35% idle
rx-gro-list on: 770 Mbit/s, CPU 40% idle
Signe-off-by: Felix Fietkau <nbd@....name>
---
net/ipv4/tcp_offload.c | 45 ++++++++++++++++++++++++++++++++++++++-
net/ipv6/tcpv6_offload.c | 46 +++++++++++++++++++++++++++++++++++++++-
2 files changed, 89 insertions(+), 2 deletions(-)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 6294e7a5c099..f987e2d8423a 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -404,6 +404,49 @@ void tcp_gro_complete(struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_gro_complete);
+static bool tcp4_check_fraglist_gro(struct sk_buff *skb)
+{
+ const struct iphdr *iph = skb_gro_network_header(skb);
+ struct net *net = dev_net(skb->dev);
+ unsigned int off, hlen, thlen;
+ struct tcphdr *th;
+ struct sock *sk;
+ int iif, sdif;
+
+ if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
+ return false;
+
+ inet_get_iif_sdif(skb, &iif, &sdif);
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*th);
+ th = skb_gro_header(skb, hlen, off);
+ if (unlikely(!th))
+ return false;
+
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ return false;
+
+ hlen = off + thlen;
+ if (!skb_gro_may_pull(skb, hlen)) {
+ th = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!th))
+ return false;
+ }
+
+ sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ iph->saddr, th->source,
+ iph->daddr, ntohs(th->dest),
+ iif, sdif);
+ if (!sk)
+ return true;
+
+ sock_put(sk);
+
+ return false;
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
{
@@ -415,7 +458,7 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
return NULL;
}
- return tcp_gro_receive(head, skb, false);
+ return tcp_gro_receive(head, skb, tcp4_check_fraglist_gro(skb));
}
INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 239588557dc4..c214f5cfe595 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -7,12 +7,56 @@
*/
#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
+#include <net/inet6_hashtables.h>
#include <net/gro.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
+static bool tcp6_check_fraglist_gro(struct sk_buff *skb)
+{
+ const struct ipv6hdr *hdr = skb_gro_network_header(skb);
+ struct net *net = dev_net(skb->dev);
+ unsigned int off, hlen, thlen;
+ struct tcphdr *th;
+ struct sock *sk;
+ int iif, sdif;
+
+ if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
+ return false;
+
+ inet6_get_iif_sdif(skb, &iif, &sdif);
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*th);
+ th = skb_gro_header(skb, hlen, off);
+ if (unlikely(!th))
+ return false;
+
+ thlen = th->doff * 4;
+ if (thlen < sizeof(*th))
+ return false;
+
+ hlen = off + thlen;
+ if (!skb_gro_may_pull(skb, hlen)) {
+ th = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!th))
+ return false;
+ }
+
+ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+ &hdr->saddr, th->source,
+ &hdr->daddr, ntohs(th->dest),
+ iif, sdif);
+ if (!sk)
+ return true;
+
+ sock_put(sk);
+
+ return false;
+}
+
INDIRECT_CALLABLE_SCOPE
struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
{
@@ -24,7 +68,7 @@ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
return NULL;
}
- return tcp_gro_receive(head, skb, false);
+ return tcp_gro_receive(head, skb, tcp6_check_fraglist_gro(skb));
}
INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
--
2.44.0
Powered by blists - more mailing lists