[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9c14eb58088a746ddf3a7fd3ae8d4498dfa36ed4.1758030357.git.asml.silence@gmail.com>
Date: Tue, 16 Sep 2025 15:27:44 +0100
From: Pavel Begunkov <asml.silence@...il.com>
To: io-uring@...r.kernel.org
Cc: asml.silence@...il.com,
axboe@...nel.dk,
netdev@...r.kernel.org
Subject: [PATCH io_uring for-6.18 01/20] io_uring/zcrx: improve rqe cache alignment
Refill queue entries are 16B structures, but because of the ring header
placement, they're 8B aligned but not naturally / 16B aligned, which
means some of them span across 2 cache lines. Push rqes to a new cache
line.
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
---
io_uring/zcrx.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/io_uring/zcrx.c b/io_uring/zcrx.c
index 51fd2350dbe9..c02045e4c1b6 100644
--- a/io_uring/zcrx.c
+++ b/io_uring/zcrx.c
@@ -352,7 +352,7 @@ static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq,
void *ptr;
int ret;
- off = sizeof(struct io_uring);
+ off = ALIGN(sizeof(struct io_uring), L1_CACHE_BYTES);
size = off + sizeof(struct io_uring_zcrx_rqe) * reg->rq_entries;
if (size > rd->size)
return -EINVAL;
@@ -367,6 +367,10 @@ static int io_allocate_rbuf_ring(struct io_zcrx_ifq *ifq,
ptr = io_region_get_ptr(&ifq->region);
ifq->rq_ring = (struct io_uring *)ptr;
ifq->rqes = (struct io_uring_zcrx_rqe *)(ptr + off);
+
+ reg->offsets.head = offsetof(struct io_uring, head);
+ reg->offsets.tail = offsetof(struct io_uring, tail);
+ reg->offsets.rqes = off;
return 0;
}
@@ -618,9 +622,6 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
goto err;
ifq->if_rxq = reg.if_rxq;
- reg.offsets.rqes = sizeof(struct io_uring);
- reg.offsets.head = offsetof(struct io_uring, head);
- reg.offsets.tail = offsetof(struct io_uring, tail);
reg.zcrx_id = id;
scoped_guard(mutex, &ctx->mmap_lock) {
--
2.49.0
Powered by blists - more mailing lists