[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231107214045.2172393-12-dw@davidwei.uk>
Date: Tue, 7 Nov 2023 13:40:36 -0800
From: David Wei <dw@...idwei.uk>
To: io-uring@...r.kernel.org,
netdev@...r.kernel.org
Cc: Jens Axboe <axboe@...nel.dk>,
Pavel Begunkov <asml.silence@...il.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
David Ahern <dsahern@...nel.org>,
Mina Almasry <almasrymina@...gle.com>,
Willem de Bruijn <willemdebruijn.kernel@...il.com>,
Dragos Tatulea <dtatulea@...dia.com>
Subject: [PATCH 11/20] net: add data pool
Add a struct data_pool that holds both a page_pool and an ifq (by
extension, a ZC pool).
Each hardware Rx queue configured for ZC will have one data_pool, set in
its struct netdev_rx_queue. Payload hardware Rx queues are filled from
the ZC pool, while header Rx queues are filled from the page_pool as
normal.
Co-developed-by: Pavel Begunkov <asml.silence@...il.com>
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
Signed-off-by: David Wei <dw@...idwei.uk>
---
include/net/data_pool.h | 74 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 74 insertions(+)
create mode 100644 include/net/data_pool.h
diff --git a/include/net/data_pool.h b/include/net/data_pool.h
new file mode 100644
index 000000000000..bf2dff23724a
--- /dev/null
+++ b/include/net/data_pool.h
@@ -0,0 +1,74 @@
+#ifndef _DATA_POOL_H
+#define _DATA_POOL_H
+
+#include <linux/io_uring.h>
+#include <linux/io_uring_types.h>
+#include <linux/mm_types.h>
+#include <linux/netdevice.h>
+#include <net/page_pool/helpers.h>
+
+struct data_pool {
+ struct page_pool *page_pool;
+ struct io_zc_rx_ifq *zc_ifq;
+ struct ubuf_info *zc_uarg;
+};
+
+static inline struct page *data_pool_alloc_page(struct data_pool *dp)
+{
+ if (dp->zc_ifq) {
+ struct io_zc_rx_buf *buf;
+
+ buf = io_zc_rx_get_buf(dp->zc_ifq);
+ if (!buf)
+ return NULL;
+ return buf->page;
+ } else {
+ return page_pool_dev_alloc_pages(dp->page_pool);
+ }
+}
+
+static inline void data_pool_fragment_page(struct data_pool *dp,
+ struct page *page,
+ unsigned long bias)
+{
+ if (dp->zc_ifq) {
+ struct io_zc_rx_buf *buf;
+
+ buf = io_zc_rx_buf_from_page(dp->zc_ifq, page);
+ atomic_set(&buf->refcount, bias);
+ } else {
+ page_pool_fragment_page(page, bias);
+ }
+}
+
+static inline void data_pool_put_page(struct data_pool *dp, struct page *page)
+{
+ if (dp->zc_ifq) {
+ struct io_zc_rx_buf *buf;
+
+ buf = io_zc_rx_buf_from_page(dp->zc_ifq, page);
+ if (!buf)
+ page_pool_recycle_direct(dp->page_pool, page);
+ else
+ io_zc_rx_put_buf(dp->zc_ifq, buf);
+ } else {
+ WARN_ON_ONCE(page->pp_magic != PP_SIGNATURE);
+
+ page_pool_recycle_direct(dp->page_pool, page);
+ }
+}
+
+static inline dma_addr_t data_pool_get_dma_addr(struct data_pool *dp,
+ struct page *page)
+{
+ if (dp->zc_ifq) {
+ struct io_zc_rx_buf *buf;
+
+ buf = io_zc_rx_buf_from_page(dp->zc_ifq, page);
+ return io_zc_rx_buf_dma(buf);
+ } else {
+ return page_pool_get_dma_addr(page);
+ }
+}
+
+#endif
--
2.39.3
Powered by blists - more mailing lists