[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aa967b683e189558babce55288dd4e1e6cad8687.1758030357.git.asml.silence@gmail.com>
Date: Tue, 16 Sep 2025 15:27:59 +0100
From: Pavel Begunkov <asml.silence@...il.com>
To: io-uring@...r.kernel.org
Cc: asml.silence@...il.com,
axboe@...nel.dk,
netdev@...r.kernel.org
Subject: [PATCH io_uring for-6.18 16/20] io_uring/zcrx: use guards for the refill lock
Use guards for rq_lock in io_zcrx_ring_refill(), makes it a tad simpler.
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
io_uring/zcrx.c | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 5f99fc7b43ee..630b19ebb47e 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -756,14 +756,12 @@ static void io_zcrx_ring_refill(struct page_pool *pp,
unsigned int mask = ifq->rq_entries - 1;
unsigned int entries;
- spin_lock_bh(&ifq->rq_lock);
+ guard(spinlock_bh)(&ifq->rq_lock);
entries = io_zcrx_rqring_entries(ifq);
entries = min_t(unsigned, entries, PP_ALLOC_CACHE_REFILL - pp->alloc.count);
- if (unlikely(!entries)) {
- spin_unlock_bh(&ifq->rq_lock);
+ if (unlikely(!entries))
return;
- }
do {
struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(ifq, mask);
@@ -801,7 +799,6 @@ static void io_zcrx_ring_refill(struct page_pool *pp,
} while (--entries);
smp_store_release(&ifq->rq_ring->head, ifq->cached_rq_head);
- spin_unlock_bh(&ifq->rq_lock);
}
static void io_zcrx_refill_slow(struct page_pool *pp, struct io_zcrx_ifq *ifq)
--
2.49.0
Powered by blists - more mailing lists