[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251124080858.89593-4-kerneljasonxing@gmail.com>
Date: Mon, 24 Nov 2025 16:08:58 +0800
From: Jason Xing <kerneljasonxing@...il.com>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
bjorn@...nel.org,
magnus.karlsson@...el.com,
maciej.fijalkowski@...el.com,
jonathan.lemon@...il.com,
sdf@...ichev.me,
ast@...nel.org,
daniel@...earbox.net,
hawk@...nel.org,
john.fastabend@...il.com
Cc: bpf@...r.kernel.org,
netdev@...r.kernel.org,
Jason Xing <kernelxing@...cent.com>
Subject: [PATCH net-next 3/3] xsk: convert cq from spin lock protection into atomic operations
From: Jason Xing <kernelxing@...cent.com>
Now it's time to convert cq in generic path into atomic operations
to achieve a higher performance number. I managed to see it improve
around 5% over different platforms.
Signed-off-by: Jason Xing <kernelxing@...cent.com>
---
include/net/xsk_buff_pool.h | 5 -----
net/xdp/xsk.c | 12 ++----------
net/xdp/xsk_buff_pool.c | 1 -
3 files changed, 2 insertions(+), 16 deletions(-)
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 92a2358c6ce3..0b1abdb99c9e 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -90,11 +90,6 @@ struct xsk_buff_pool {
* destructor callback.
*/
spinlock_t cq_prod_lock;
- /* Mutual exclusion of the completion ring in the SKB mode.
- * Protect: when sockets share a single cq when the same netdev
- * and queue id is shared.
- */
- spinlock_t cq_cached_prod_lock;
struct xdp_buff_xsk *free_heads[];
};
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 4e95b894f218..6b99a7eeb952 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -548,13 +548,7 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
{
- int ret;
-
- spin_lock(&pool->cq_cached_prod_lock);
- ret = xskq_prod_reserve(pool->cq, false);
- spin_unlock(&pool->cq_cached_prod_lock);
-
- return ret;
+ return xskq_prod_reserve(pool->cq, true);
}
static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
@@ -587,9 +581,7 @@ static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
{
- spin_lock(&pool->cq_cached_prod_lock);
- xskq_prod_cancel_n(pool->cq, n, false);
- spin_unlock(&pool->cq_cached_prod_lock);
+ xskq_prod_cancel_n(pool->cq, n, true);
}
static void xsk_inc_num_desc(struct sk_buff *skb)
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 51526034c42a..9539f121b290 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -91,7 +91,6 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
INIT_LIST_HEAD(&pool->xsk_tx_list);
spin_lock_init(&pool->xsk_tx_list_lock);
spin_lock_init(&pool->cq_prod_lock);
- spin_lock_init(&pool->cq_cached_prod_lock);
refcount_set(&pool->users, 1);
pool->fq = xs->fq_tmp;
--
2.41.3
Powered by blists - more mailing lists