[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251021131209.41491-3-kerneljasonxing@gmail.com>
Date: Tue, 21 Oct 2025 21:12:02 +0800
From: Jason Xing <kerneljasonxing@...il.com>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
bjorn@...nel.org,
magnus.karlsson@...el.com,
maciej.fijalkowski@...el.com,
jonathan.lemon@...il.com,
sdf@...ichev.me,
ast@...nel.org,
daniel@...earbox.net,
hawk@...nel.org,
john.fastabend@...il.com,
joe@...a.to,
willemdebruijn.kernel@...il.com
Cc: bpf@...r.kernel.org,
netdev@...r.kernel.org,
Jason Xing <kernelxing@...cent.com>
Subject: [PATCH net-next v3 2/9] xsk: extend xsk_build_skb() to support passing an already allocated skb
From: Jason Xing <kernelxing@...cent.com>
To avoid reinvent the wheel, the patch provides a way to let batch
feature to reuse xsk_build_skb() as the rest process of the whole
initialization just after the skb is allocated.
The original xsk_build_skb() itself allocates a new skb by calling
sock_alloc_send_skb whether in copy mode or zerocopy mode. Add a new
parameter allocated skb to let other callers to pass an already
allocated skb to support later xmit batch feature. It replaces the
previous allocation of memory function with a bulk one.
Signed-off-by: Jason Xing <kernelxing@...cent.com>
---
include/net/xdp_sock.h | 3 +++
net/xdp/xsk.c | 23 ++++++++++++++++-------
2 files changed, 19 insertions(+), 7 deletions(-)
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index f33f1e7dcea2..8944f4782eb6 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -127,6 +127,9 @@ struct xsk_tx_metadata_ops {
void (*tmo_request_launch_time)(u64 launch_time, void *priv);
};
+struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
+ struct sk_buff *allocated_skb,
+ struct xdp_desc *desc);
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index ace91800c447..f9458347ff7b 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -697,6 +697,7 @@ static int xsk_skb_metadata(struct sk_buff *skb, void *buffer,
}
static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
+ struct sk_buff *allocated_skb,
struct xdp_desc *desc)
{
struct xsk_buff_pool *pool = xs->pool;
@@ -714,7 +715,10 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
if (!skb) {
hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
- skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
+ if (!allocated_skb)
+ skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
+ else
+ skb = allocated_skb;
if (unlikely(!skb))
return ERR_PTR(err);
@@ -769,15 +773,16 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
return skb;
}
-static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
- struct xdp_desc *desc)
+struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
+ struct sk_buff *allocated_skb,
+ struct xdp_desc *desc)
{
struct net_device *dev = xs->dev;
struct sk_buff *skb = xs->skb;
int err;
if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
- skb = xsk_build_skb_zerocopy(xs, desc);
+ skb = xsk_build_skb_zerocopy(xs, allocated_skb, desc);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
skb = NULL;
@@ -792,8 +797,12 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
if (!skb) {
hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
- tr = dev->needed_tailroom;
- skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
+ if (!allocated_skb) {
+ tr = dev->needed_tailroom;
+ skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
+ } else {
+ skb = allocated_skb;
+ }
if (unlikely(!skb))
goto free_err;
@@ -906,7 +915,7 @@ static int __xsk_generic_xmit(struct sock *sk)
goto out;
}
- skb = xsk_build_skb(xs, &desc);
+ skb = xsk_build_skb(xs, NULL, &desc);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
if (err != -EOVERFLOW)
--
2.41.3
Powered by blists - more mailing lists