[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250825135342.53110-2-kerneljasonxing@gmail.com>
Date: Mon, 25 Aug 2025 21:53:34 +0800
From: Jason Xing <kerneljasonxing@...il.com>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
bjorn@...nel.org,
magnus.karlsson@...el.com,
maciej.fijalkowski@...el.com,
jonathan.lemon@...il.com,
sdf@...ichev.me,
ast@...nel.org,
daniel@...earbox.net,
hawk@...nel.org,
john.fastabend@...il.com,
horms@...nel.org,
andrew+netdev@...n.ch
Cc: bpf@...r.kernel.org,
netdev@...r.kernel.org,
Jason Xing <kernelxing@...cent.com>
Subject: [PATCH net-next v2 1/9] xsk: introduce XDP_GENERIC_XMIT_BATCH setsockopt
From: Jason Xing <kernelxing@...cent.com>
Add a new socket option to provide an alternative to achieve a higher
overall throughput with the rest of series applied.
Init skb_cache and desc_batch when setting setsockopt with xs->mutex
protection.
skb_cache will be used to store newly allocated skb at one time in the
xmit path. desc_batch will be used to temporarily store descriptors of
pool.
Signed-off-by: Jason Xing <kernelxing@...cent.com>
---
Documentation/networking/af_xdp.rst | 11 +++++++
include/net/xdp_sock.h | 3 ++
include/uapi/linux/if_xdp.h | 1 +
net/xdp/xsk.c | 47 +++++++++++++++++++++++++++++
tools/include/uapi/linux/if_xdp.h | 1 +
5 files changed, 63 insertions(+)
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 50d92084a49c..decb4da80db4 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -447,6 +447,17 @@ mode to allow application to tune the per-socket maximum iteration for
better throughput and less frequency of send syscall.
Allowed range is [32, xs->tx->nentries].
+XDP_GENERIC_XMIT_BATCH
+----------------------
+
+It provides an option that allows application to use batch xmit in the copy
+mode. Batch process tries to allocate a certain number skbs through bulk
+mechanism first and then send them out at one time, minimizing the number
+of grabbing/releasing a few locks (like cache lock and queue lock).
+it normally gains the overall performance improvement as observed by
+xdpsock benchmark, whereas it might increase the latency of per packet.
+The maximum value shouldn't be larger than xs->max_tx_budget.
+
XDP_STATISTICS getsockopt
-------------------------
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index ce587a225661..c2b05268b8ad 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -70,6 +70,7 @@ struct xdp_sock {
* preventing other XSKs from being starved.
*/
u32 tx_budget_spent;
+ u32 generic_xmit_batch;
/* Statistics */
u64 rx_dropped;
@@ -89,6 +90,8 @@ struct xdp_sock {
struct mutex mutex;
struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
+ struct sk_buff **skb_cache;
+ struct xdp_desc *desc_batch;
};
/*
diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h
index 23a062781468..44cb72cd328e 100644
--- a/include/uapi/linux/if_xdp.h
+++ b/include/uapi/linux/if_xdp.h
@@ -80,6 +80,7 @@ struct xdp_mmap_offsets {
#define XDP_STATISTICS 7
#define XDP_OPTIONS 8
#define XDP_MAX_TX_SKB_BUDGET 9
+#define XDP_GENERIC_XMIT_BATCH 10
struct xdp_umem_reg {
__u64 addr; /* Start of packet data area */
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 9c3acecc14b1..e75a6e2bab83 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -1122,6 +1122,8 @@ static int xsk_release(struct socket *sock)
xskq_destroy(xs->tx);
xskq_destroy(xs->fq_tmp);
xskq_destroy(xs->cq_tmp);
+ kfree(xs->skb_cache);
+ kvfree(xs->desc_batch);
sock_orphan(sk);
sock->sk = NULL;
@@ -1456,6 +1458,51 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
WRITE_ONCE(xs->max_tx_budget, budget);
return 0;
}
+ case XDP_GENERIC_XMIT_BATCH:
+ {
+ struct xdp_desc *descs;
+ struct sk_buff **skbs;
+ unsigned int batch;
+ int ret = 0;
+
+ if (optlen != sizeof(batch))
+ return -EINVAL;
+ if (copy_from_sockptr(&batch, optval, sizeof(batch)))
+ return -EFAULT;
+ if (batch > xs->max_tx_budget)
+ return -EACCES;
+
+ mutex_lock(&xs->mutex);
+ if (!batch) {
+ kfree(xs->skb_cache);
+ kvfree(xs->desc_batch);
+ xs->generic_xmit_batch = 0;
+ goto out;
+ }
+
+ skbs = kmalloc(batch * sizeof(struct sk_buff *), GFP_KERNEL);
+ if (!skbs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ descs = kvcalloc(batch, sizeof(*xs->desc_batch), GFP_KERNEL);
+ if (!skbs) {
+ kfree(skbs);
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (xs->skb_cache)
+ kfree(xs->skb_cache);
+ if (xs->desc_batch)
+ kvfree(xs->desc_batch);
+
+ xs->skb_cache = skbs;
+ xs->desc_batch = descs;
+ xs->generic_xmit_batch = batch;
+out:
+ mutex_unlock(&xs->mutex);
+ return ret;
+ }
default:
break;
}
diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h
index 23a062781468..44cb72cd328e 100644
--- a/tools/include/uapi/linux/if_xdp.h
+++ b/tools/include/uapi/linux/if_xdp.h
@@ -80,6 +80,7 @@ struct xdp_mmap_offsets {
#define XDP_STATISTICS 7
#define XDP_OPTIONS 8
#define XDP_MAX_TX_SKB_BUDGET 9
+#define XDP_GENERIC_XMIT_BATCH 10
struct xdp_umem_reg {
__u64 addr; /* Start of packet data area */
--
2.41.3
Powered by blists - more mailing lists