[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251125085431.4039-4-kerneljasonxing@gmail.com>
Date: Tue, 25 Nov 2025 16:54:31 +0800
From: Jason Xing <kerneljasonxing@...il.com>
To: davem@...emloft.net,
edumazet@...gle.com,
kuba@...nel.org,
pabeni@...hat.com,
bjorn@...nel.org,
magnus.karlsson@...el.com,
maciej.fijalkowski@...el.com,
jonathan.lemon@...il.com,
sdf@...ichev.me,
ast@...nel.org,
daniel@...earbox.net,
hawk@...nel.org,
john.fastabend@...il.com
Cc: bpf@...r.kernel.org,
netdev@...r.kernel.org,
Jason Xing <kernelxing@...cent.com>
Subject: [PATCH net-next v2 3/3] xsk: remove spin lock protection of cached_prod
From: Jason Xing <kernelxing@...cent.com>
Remove the spin lock protection along with some functions adjusted.
Now cached_prod is fully converted to atomic, which improves the
performance by around 5% over different platforms.
Signed-off-by: Jason Xing <kernelxing@...cent.com>
---
include/net/xsk_buff_pool.h | 5 -----
net/xdp/xsk.c | 21 ++++-----------------
net/xdp/xsk_buff_pool.c | 1 -
3 files changed, 4 insertions(+), 23 deletions(-)
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 92a2358c6ce3..0b1abdb99c9e 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -90,11 +90,6 @@ struct xsk_buff_pool {
* destructor callback.
*/
spinlock_t cq_prod_lock;
- /* Mutual exclusion of the completion ring in the SKB mode.
- * Protect: when sockets share a single cq when the same netdev
- * and queue id is shared.
- */
- spinlock_t cq_cached_prod_lock;
struct xdp_buff_xsk *free_heads[];
};
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index b63409b1422e..ae8a92c168b8 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -546,17 +546,6 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
}
-static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
-{
- int ret;
-
- spin_lock(&pool->cq_cached_prod_lock);
- ret = xsk_cq_cached_prod_reserve(pool->cq);
- spin_unlock(&pool->cq_cached_prod_lock);
-
- return ret;
-}
-
static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
struct sk_buff *skb)
{
@@ -585,11 +574,9 @@ static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
spin_unlock_irqrestore(&pool->cq_prod_lock, flags);
}
-static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
+static void xsk_cq_cached_prod_cancel(struct xsk_buff_pool *pool, u32 n)
{
- spin_lock(&pool->cq_cached_prod_lock);
atomic_sub(n, &pool->cq->cached_prod_atomic);
- spin_unlock(&pool->cq_cached_prod_lock);
}
static void xsk_inc_num_desc(struct sk_buff *skb)
@@ -643,7 +630,7 @@ static void xsk_consume_skb(struct sk_buff *skb)
}
skb->destructor = sock_wfree;
- xsk_cq_cancel_locked(xs->pool, num_descs);
+ xsk_cq_cached_prod_cancel(xs->pool, num_descs);
/* Free skb without triggering the perf drop trace */
consume_skb(skb);
xs->skb = NULL;
@@ -860,7 +847,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
xskq_cons_release(xs->tx);
} else {
/* Let application retry */
- xsk_cq_cancel_locked(xs->pool, 1);
+ xsk_cq_cached_prod_cancel(xs->pool, 1);
}
return ERR_PTR(err);
@@ -898,7 +885,7 @@ static int __xsk_generic_xmit(struct sock *sk)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- err = xsk_cq_reserve_locked(xs->pool);
+ err = xsk_cq_cached_prod_reserve(xs->pool->cq);
if (err) {
err = -EAGAIN;
goto out;
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 51526034c42a..9539f121b290 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -91,7 +91,6 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
INIT_LIST_HEAD(&pool->xsk_tx_list);
spin_lock_init(&pool->xsk_tx_list_lock);
spin_lock_init(&pool->cq_prod_lock);
- spin_lock_init(&pool->cq_cached_prod_lock);
refcount_set(&pool->users, 1);
pool->fq = xs->fq_tmp;
--
2.41.3
Powered by blists - more mailing lists