[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220728113844.GA53749@debian>
Date: Thu, 28 Jul 2022 13:39:00 +0200
From: Richard Gobert <richardbgobert@...il.com>
To: davem@...emloft.net, kuba@...nel.org, pabeni@...hat.com,
yoshfuji@...ux-ipv6.org, dsahern@...nel.org, xeb@...l.ru,
edumazet@...gle.com, iwienand@...hat.com, arnd@...db.de,
netdev@...r.kernel.org
Subject: [PATCH] net: gro: skb_gro_header_try_fast helper function
Introduce a simple helper function to replace a common pattern.
When accessing the GRO header, we fetch the pointer from frag0,
then test its validity and fetch it from the skb when necessary.
This leads to the pattern
skb_gro_header_fast -> skb_gro_header_hard -> skb_gro_header_slow
recurring many times throughout the GRO code.
This patch replaces these patterns with a single inlined function
call, improving code readability.
Signed-off-by: Richard Gobert <richardbgobert@...il.com>
---
include/net/gro.h | 33 ++++++++++++++++++---------------
net/ethernet/eth.c | 9 +++------
net/ipv4/af_inet.c | 9 +++------
net/ipv4/gre_offload.c | 9 +++------
net/ipv4/tcp_offload.c | 9 +++------
5 files changed, 30 insertions(+), 39 deletions(-)
diff --git a/include/net/gro.h b/include/net/gro.h
index 867656b0739c..c37c5d6f8c02 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -160,6 +160,17 @@ static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
return skb->data + offset;
}
+static inline void *skb_gro_header_try_fast(struct sk_buff *skb,
+ unsigned int hlen, unsigned int offset)
+{
+ void *ptr;
+
+ ptr = skb_gro_header_fast(skb, offset);
+ if (skb_gro_header_hard(skb, hlen))
+ ptr = skb_gro_header_slow(skb, hlen, offset);
+ return ptr;
+}
+
static inline void *skb_gro_network_header(struct sk_buff *skb)
{
return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
@@ -301,12 +312,9 @@ static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
return ptr;
}
- ptr = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, off + plen)) {
- ptr = skb_gro_header_slow(skb, off + plen, off);
- if (!ptr)
- return NULL;
- }
+ ptr = skb_gro_header_try_fast(skb, off + plen, off);
+ if (!ptr)
+ return NULL;
delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
start, offset);
@@ -329,12 +337,9 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
if (!grc->delta)
return;
- ptr = skb_gro_header_fast(skb, grc->offset);
- if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
- ptr = skb_gro_header_slow(skb, plen, grc->offset);
- if (!ptr)
- return;
- }
+ ptr = skb_gro_header_try_fast(skb, plen, grc->offset);
+ if (!ptr)
+ return;
remcsum_unadjust((__sum16 *)ptr, grc->delta);
}
@@ -405,9 +410,7 @@ static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
off = skb_gro_offset(skb);
hlen = off + sizeof(*uh);
- uh = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen))
- uh = skb_gro_header_slow(skb, hlen, off);
+ uh = skb_gro_header_try_fast(skb, hlen, off);
return uh;
}
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 62b89d6f54fd..3e74a3ce4984 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -414,12 +414,9 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
off_eth = skb_gro_offset(skb);
hlen = off_eth + sizeof(*eh);
- eh = skb_gro_header_fast(skb, off_eth);
- if (skb_gro_header_hard(skb, hlen)) {
- eh = skb_gro_header_slow(skb, hlen, off_eth);
- if (unlikely(!eh))
- goto out;
- }
+ eh = skb_gro_header_try_fast(skb, hlen, off_eth);
+ if (unlikely(!eh))
+ goto out;
flush = 0;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 93da9f783bec..4591c1e1a8e1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1447,12 +1447,9 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
off = skb_gro_offset(skb);
hlen = off + sizeof(*iph);
- iph = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen)) {
- iph = skb_gro_header_slow(skb, hlen, off);
- if (unlikely(!iph))
- goto out;
- }
+ iph = skb_gro_header_try_fast(skb, hlen, off);
+ if (unlikely(!iph))
+ goto out;
proto = iph->protocol;
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 07073fa35205..a97ed708b47a 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -137,12 +137,9 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
off = skb_gro_offset(skb);
hlen = off + sizeof(*greh);
- greh = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen)) {
- greh = skb_gro_header_slow(skb, hlen, off);
- if (unlikely(!greh))
- goto out;
- }
+ greh = skb_gro_header_try_fast(skb, hlen, off);
+ if (unlikely(!greh))
+ goto out;
/* Only support version 0 and K (key), C (csum) flags. Note that
* although the support for the S (seq#) flag can be added easily
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 30abde86db45..a1229253d558 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -195,12 +195,9 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
off = skb_gro_offset(skb);
hlen = off + sizeof(*th);
- th = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen)) {
- th = skb_gro_header_slow(skb, hlen, off);
- if (unlikely(!th))
- goto out;
- }
+ th = skb_gro_header_try_fast(skb, hlen, off);
+ if (unlikely(!th))
+ goto out;
thlen = th->doff * 4;
if (thlen < sizeof(*th))
--
2.36.1
Powered by blists - more mailing lists