[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250511004110.145171-3-ebiggers@kernel.org>
Date: Sat, 10 May 2025 17:41:02 -0700
From: Eric Biggers <ebiggers@...nel.org>
To: netdev@...r.kernel.org
Cc: linux-nvme@...ts.infradead.org,
linux-sctp@...r.kernel.org,
linux-rdma@...r.kernel.org,
linux-kernel@...r.kernel.org,
Daniel Borkmann <daniel@...earbox.net>,
Marcelo Ricardo Leitner <marcelo.leitner@...il.com>,
Sagi Grimberg <sagi@...mberg.me>,
Ard Biesheuvel <ardb@...nel.org>
Subject: [PATCH net-next 02/10] net: add skb_crc32c()
From: Eric Biggers <ebiggers@...gle.com>
Add skb_crc32c(), which calculates the CRC32C of a sk_buff. It will
replace __skb_checksum(), which unnecessarily supports arbitrary
checksums. Compared to __skb_checksum(), skb_crc32c():
- Uses the correct type for CRC32C values (u32, not __wsum).
- Does not require the caller to provide a skb_checksum_ops struct.
- Is faster because it does not use indirect calls and does not use
the very slow crc32c_combine().
According to commit 2817a336d4d5 ("net: skb_checksum: allow custom
update/combine for walking skb") which added __skb_checksum(), the
original motivation for the abstraction layer was to avoid code
duplication for CRC32C and other checksums in the future. However:
- No additional checksums showed up after CRC32C. __skb_checksum()
is only used with the "regular" net checksum and CRC32C.
- Indirect calls are expensive. Commit 2544af0344ba ("net: avoid
indirect calls in L4 checksum calculation") worked around this
using the INDIRECT_CALL_1 macro. But that only avoided the indirect
call for the net checksum, and at the cost of an extra branch.
- The checksums use different types (__wsum and u32), causing casts
to be needed.
- It made the checksums of fragments be combined (rather than
chained) for both checksums, despite this being highly
counterproductive for CRC32C due to how slow crc32c_combine() is.
This can clearly be seen in commit 4c2f24549644 ("sctp: linearize
early if it's not GSO") which tried to work around this performance
bug. With a dedicated function for each checksum, we can instead
just use the proper strategy for each checksum.
Signed-off-by: Eric Biggers <ebiggers@...gle.com>
---
include/linux/skbuff.h | 1 +
net/core/skbuff.c | 73 ++++++++++++++++++++++++++++++++++++++++++
2 files changed, 74 insertions(+)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f3e72be6f634..33b33bb18aa6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4192,10 +4192,11 @@ extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum, const struct skb_checksum_ops *ops);
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
+u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc);
static inline void * __must_check
__skb_header_pointer(const struct sk_buff *skb, int offset, int len,
const void *data, int hlen, void *buffer)
{
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d73ad79fe739..b9900cc16a24 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -62,10 +62,11 @@
#include <linux/bitfield.h>
#include <linux/if_vlan.h>
#include <linux/mpls.h>
#include <linux/kcov.h>
#include <linux/iov_iter.h>
+#include <linux/crc32.h>
#include <net/protocol.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/checksum.h>
@@ -3626,10 +3627,82 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
BUG_ON(len);
return csum;
}
EXPORT_SYMBOL(skb_copy_and_csum_bits);
+#ifdef CONFIG_NET_CRC32C
+u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc)
+{
+ int start = skb_headlen(skb);
+ int i, copy = start - offset;
+ struct sk_buff *frag_iter;
+
+ if (copy > 0) {
+ copy = min(copy, len);
+ crc = crc32c(crc, skb->data + offset, copy);
+ len -= copy;
+ if (len == 0)
+ return crc;
+ offset += copy;
+ }
+
+ if (WARN_ON_ONCE(!skb_frags_readable(skb)))
+ return 0;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ WARN_ON(start > offset + len);
+
+ end = start + skb_frag_size(frag);
+ copy = end - offset;
+ if (copy > 0) {
+ u32 p_off, p_len, copied;
+ struct page *p;
+ u8 *vaddr;
+
+ copy = min(copy, len);
+ skb_frag_foreach_page(frag,
+ skb_frag_off(frag) + offset - start,
+ copy, p, p_off, p_len, copied) {
+ vaddr = kmap_atomic(p);
+ crc = crc32c(crc, vaddr + p_off, p_len);
+ kunmap_atomic(vaddr);
+ }
+ len -= copy;
+ if (len == 0)
+ return crc;
+ offset += copy;
+ }
+ start = end;
+ }
+
+ skb_walk_frags(skb, frag_iter) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + frag_iter->len;
+ copy = end - offset;
+ if (copy > 0) {
+ copy = min(copy, len);
+ crc = skb_crc32c(frag_iter, offset - start, copy, crc);
+ len -= copy;
+ if (len == 0)
+ return crc;
+ offset += copy;
+ }
+ start = end;
+ }
+ BUG_ON(len);
+
+ return crc;
+}
+EXPORT_SYMBOL(skb_crc32c);
+#endif /* CONFIG_NET_CRC32C */
+
__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
{
__sum16 sum;
sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
--
2.49.0
Powered by blists - more mailing lists