[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250825135342.53110-6-kerneljasonxing@gmail.com>
Date: Mon, 25 Aug 2025 21:53:38 +0800
From: Jason Xing <kerneljasonxing@...il.com>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
bjorn@...nel.org,
magnus.karlsson@...el.com,
maciej.fijalkowski@...el.com,
jonathan.lemon@...il.com,
sdf@...ichev.me,
ast@...nel.org,
daniel@...earbox.net,
hawk@...nel.org,
john.fastabend@...il.com,
horms@...nel.org,
andrew+netdev@...n.ch
Cc: bpf@...r.kernel.org,
netdev@...r.kernel.org,
Jason Xing <kernelxing@...cent.com>
Subject: [PATCH net-next v2 5/9] xsk: add xsk_alloc_batch_skb() to build skbs in batch
From: Jason Xing <kernelxing@...cent.com>
Support allocating and building skbs in batch.
This patch uses kmem_cache_alloc_bulk() to complete the batch allocation
which relies on the global common cache 'net_hotdata.skbuff_cache'. Use
a xsk standalone skb cache (namely, xs->skb_cache) to store skbs instead
of resorting to napi_alloc_cache that was designed for softirq condition.
In case that memory shortage occurs, to avoid frequently allocating
skbs and then freeing part of them, using the allocated skbs from cache
in a reversed order (like from 10, 9, ..., 2, 1, 0) solves the issue.
After allocating memory for each of skbs, in a 'for' loop, the patch
borrows part of __allocate_skb() to initializing skb and then calls
xsk_build_skb() to complete the rest of whole process, like copying data
and stuff.
Considering passing no fclone flag during allocation period, in terms of
freeing process, napi_consume_skb() in the tx completion would put the
skb into different and global cache 'net_hotdata.skbuff_cache' that
implements the deferred freeing skb feature to avoid freeing skb one
by one.
Signed-off-by: Jason Xing <kernelxing@...cent.com>
---
include/net/xdp_sock.h | 3 ++
net/core/skbuff.c | 103 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 106 insertions(+)
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index cbba880c27c3..b533317409df 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -92,6 +92,7 @@ struct xdp_sock {
struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
struct sk_buff **skb_cache;
struct xdp_desc *desc_batch;
+ unsigned int skb_count;
};
/*
@@ -127,6 +128,8 @@ struct xsk_tx_metadata_ops {
struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
struct sk_buff *allocated_skb,
struct xdp_desc *desc);
+int xsk_alloc_batch_skb(struct xdp_sock *xs, u32 nb_pkts, u32 nb_descs,
+ int *consumed, int *start, int *end);
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ee0274417948..c9071e56d133 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -80,6 +80,8 @@
#include <net/mctp.h>
#include <net/page_pool/helpers.h>
#include <net/dropreason.h>
+#include <net/xdp_sock.h>
+#include <net/xsk_buff_pool.h>
#include <linux/uaccess.h>
#include <trace/events/skb.h>
@@ -614,6 +616,107 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
return obj;
}
+int xsk_alloc_batch_skb(struct xdp_sock *xs, u32 nb_pkts, u32 nb_descs,
+ int *consumed, int *start, int *end)
+{
+ struct xdp_desc *descs = xs->desc_batch;
+ struct sk_buff **skbs = xs->skb_cache;
+ gfp_t gfp_mask = xs->sk.sk_allocation;
+ struct net_device *dev = xs->dev;
+ int node = NUMA_NO_NODE;
+ struct sk_buff *skb;
+ u32 i = 0, j = 0;
+ bool pfmemalloc;
+ u32 base_len;
+ int err = 0;
+ u8 *data;
+
+ base_len = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
+ if (!(dev->priv_flags & IFF_TX_SKB_NO_LINEAR))
+ base_len += dev->needed_tailroom;
+
+ if (xs->skb_count >= nb_pkts)
+ goto build;
+
+ if (xs->skb) {
+ i = 1;
+ xs->skb_count++;
+ }
+
+ xs->skb_count += kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
+ gfp_mask, nb_pkts - xs->skb_count,
+ (void **)&skbs[xs->skb_count]);
+ if (xs->skb_count < nb_pkts)
+ nb_pkts = xs->skb_count;
+
+build:
+ for (i = 0, j = 0; j < nb_descs; j++) {
+ if (!xs->skb) {
+ u32 size = base_len + descs[j].len;
+
+ /* In case we don't have enough allocated skbs */
+ if (i >= nb_pkts) {
+ err = -EAGAIN;
+ break;
+ }
+
+ if (sk_wmem_alloc_get(&xs->sk) > READ_ONCE(xs->sk.sk_sndbuf)) {
+ err = -EAGAIN;
+ break;
+ }
+
+ skb = skbs[xs->skb_count - 1 - i];
+
+ prefetchw(skb);
+ /* We do our best to align skb_shared_info on a separate cache
+ * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
+ * aligned memory blocks, unless SLUB/SLAB debug is enabled.
+ * Both skb->head and skb_shared_info are cache line aligned.
+ */
+ data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
+ if (unlikely(!data)) {
+ err = -ENOBUFS;
+ break;
+ }
+ /* kmalloc_size_roundup() might give us more room than requested.
+ * Put skb_shared_info exactly at the end of allocated zone,
+ * to allow max possible filling before reallocation.
+ */
+ prefetchw(data + SKB_WITH_OVERHEAD(size));
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ __build_skb_around(skb, data, size);
+ skb->pfmemalloc = pfmemalloc;
+ skb_set_owner_w(skb, &xs->sk);
+ } else if (unlikely(i == 0)) {
+ /* We have a skb in cache that is left last time */
+ kmem_cache_free(net_hotdata.skbuff_cache, skbs[xs->skb_count - 1]);
+ skbs[xs->skb_count - 1] = xs->skb;
+ }
+
+ skb = xsk_build_skb(xs, skb, &descs[j]);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ break;
+ }
+
+ if (xp_mb_desc(&descs[j])) {
+ xs->skb = skb;
+ continue;
+ }
+
+ xs->skb = NULL;
+ i++;
+ }
+
+ *consumed = j;
+ *start = xs->skb_count - 1;
+ *end = xs->skb_count - i;
+ xs->skb_count -= i;
+
+ return err;
+}
+
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the
* [BEEP] leaks.
--
2.41.3
Powered by blists - more mailing lists