[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231107214045.2172393-15-dw@davidwei.uk>
Date: Tue, 7 Nov 2023 13:40:39 -0800
From: David Wei <dw@...idwei.uk>
To: io-uring@...r.kernel.org,
netdev@...r.kernel.org
Cc: Jens Axboe <axboe@...nel.dk>,
Pavel Begunkov <asml.silence@...il.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
David Ahern <dsahern@...nel.org>,
Mina Almasry <almasrymina@...gle.com>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
Dragos Tatulea <dtatulea@...dia.com>
Subject: [PATCH 14/20] io_uring/zcrx: introduce io_zc_get_rbuf_cqe
From: Pavel Begunkov <asml.silence@...il.com>
Add a simple helper for grabbing a new rbuf entry. It greatly helps
zc_rx_recv_frag()'s readability and will be reused later
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
Signed-off-by: David Wei <dw@...idwei.uk>
---
io_uring/zc_rx.c | 36 ++++++++++++++++++++++++------------
1 file changed, 24 insertions(+), 12 deletions(-)
diff --git a/io_uring/zc_rx.c b/io_uring/zc_rx.c
index 038692d3265e..c1502ec3e629 100644
--- a/io_uring/zc_rx.c
+++ b/io_uring/zc_rx.c
@@ -576,31 +576,43 @@ static struct io_zc_rx_ifq *io_zc_rx_ifq_skb(struct sk_buff *skb)
return NULL;
}
+static inline struct io_uring_rbuf_cqe *io_zc_get_rbuf_cqe(struct io_zc_rx_ifq *ifq)
+{
+ struct io_uring_rbuf_cqe *cqe;
+ unsigned int cq_idx, queued, free, entries;
+ unsigned int mask = ifq->cq_entries - 1;
+
+ cq_idx = ifq->cached_cq_tail & mask;
+ smp_rmb();
+ queued = min(io_zc_rx_cqring_entries(ifq), ifq->cq_entries);
+ free = ifq->cq_entries - queued;
+ entries = min(free, ifq->cq_entries - cq_idx);
+ if (!entries)
+ return NULL;
+
+ cqe = &ifq->cqes[cq_idx];
+ ifq->cached_cq_tail++;
+ return cqe;
+}
+
static int zc_rx_recv_frag(struct io_zc_rx_ifq *ifq, const skb_frag_t *frag,
int off, int len, bool zc_skb)
{
struct io_uring_rbuf_cqe *cqe;
- unsigned int cq_idx, queued, free, entries;
struct page *page;
- unsigned int mask;
u32 pgid;
page = skb_frag_page(frag);
off += skb_frag_off(frag);
if (likely(zc_skb && is_zc_rx_page(page))) {
- mask = ifq->cq_entries - 1;
+ cqe = io_zc_get_rbuf_cqe(ifq);
+ if (!cqe)
+ return -ENOBUFS;
+
pgid = page_private(page) & 0xffffffff;
io_zc_rx_get_buf_uref(ifq->pool, pgid);
- cq_idx = ifq->cached_cq_tail & mask;
- smp_rmb();
- queued = min(io_zc_rx_cqring_entries(ifq), ifq->cq_entries);
- free = ifq->cq_entries - queued;
- entries = min(free, ifq->cq_entries - cq_idx);
- if (!entries)
- return -ENOBUFS;
- cqe = &ifq->cqes[cq_idx];
- ifq->cached_cq_tail++;
+
cqe->region = 0;
cqe->off = pgid * PAGE_SIZE + off;
cqe->len = len;
--
2.39.3
Powered by blists - more mailing lists