[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241007221603.1703699-8-dw@davidwei.uk>
Date: Mon, 7 Oct 2024 15:15:55 -0700
From: David Wei <dw@...idwei.uk>
To: io-uring@...r.kernel.org,
netdev@...r.kernel.org
Cc: David Wei <dw@...idwei.uk>,
Jens Axboe <axboe@...nel.dk>,
Pavel Begunkov <asml.silence@...il.com>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
"David S. Miller" <davem@...emloft.net>,
Eric Dumazet <edumazet@...gle.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
David Ahern <dsahern@...nel.org>,
Mina Almasry <almasrymina@...gle.com>
Subject: [PATCH v1 07/15] net: page pool: add helper creating area from pages
From: Pavel Begunkov <asml.silence@...il.com>
Add a helper that takes an array of pages and initialises passed in
memory provider's area with them, where each net_iov takes one page.
It's also responsible for setting up dma mappings.
We keep it in page_pool.c not to leak netmem details to outside
providers like io_uring, which don't have access to netmem_priv.h
and other private helpers.
Signed-off-by: Pavel Begunkov <asml.silence@...il.com>
Signed-off-by: David Wei <dw@...idwei.uk>
---
include/net/page_pool/types.h | 17 ++++++++++
net/core/page_pool.c | 61 +++++++++++++++++++++++++++++++++--
2 files changed, 76 insertions(+), 2 deletions(-)
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index fd0376ad0d26..1180ad07423c 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -271,6 +271,11 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
const struct xdp_mem_info *mem);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count);
+
+int page_pool_init_paged_area(struct page_pool *pool,
+ struct net_iov_area *area, struct page **pages);
+void page_pool_release_area(struct page_pool *pool,
+ struct net_iov_area *area);
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
@@ -286,6 +291,18 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count)
{
}
+
+static inline int page_pool_init_paged_area(struct page_pool *pool,
+ struct net_iov_area *area,
+ struct page **pages)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void page_pool_release_area(struct page_pool *pool,
+ struct net_iov_area *area)
+{
+}
#endif
void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem,
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 9a675e16e6a4..112b6fe4b7ff 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -459,7 +459,8 @@ page_pool_dma_sync_for_device(const struct page_pool *pool,
__page_pool_dma_sync_for_device(pool, netmem, dma_sync_size);
}
-static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
+static bool page_pool_dma_map_page(struct page_pool *pool, netmem_ref netmem,
+ struct page *page)
{
dma_addr_t dma;
@@ -468,7 +469,7 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
* into page private data (i.e 32bit cpu with 64bit DMA caps)
* This mapping is kept for lifetime of page, until leaving pool.
*/
- dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0,
+ dma = dma_map_page_attrs(pool->p.dev, page, 0,
(PAGE_SIZE << pool->p.order), pool->p.dma_dir,
DMA_ATTR_SKIP_CPU_SYNC |
DMA_ATTR_WEAK_ORDERING);
@@ -490,6 +491,11 @@ static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
return false;
}
+static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem)
+{
+ return page_pool_dma_map_page(pool, netmem, netmem_to_page(netmem));
+}
+
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
gfp_t gfp)
{
@@ -1154,3 +1160,54 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
}
}
EXPORT_SYMBOL(page_pool_update_nid);
+
+static void page_pool_release_page_dma(struct page_pool *pool,
+ netmem_ref netmem)
+{
+ __page_pool_release_page_dma(pool, netmem);
+}
+
+int page_pool_init_paged_area(struct page_pool *pool,
+ struct net_iov_area *area, struct page **pages)
+{
+ struct net_iov *niov;
+ netmem_ref netmem;
+ int i, ret = 0;
+
+ if (!pool->dma_map)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < area->num_niovs; i++) {
+ niov = &area->niovs[i];
+ netmem = net_iov_to_netmem(niov);
+
+ page_pool_set_pp_info(pool, netmem);
+ if (!page_pool_dma_map_page(pool, netmem, pages[i])) {
+ ret = -EINVAL;
+ goto err_unmap_dma;
+ }
+ }
+ return 0;
+
+err_unmap_dma:
+ while (i--) {
+ netmem = net_iov_to_netmem(&area->niovs[i]);
+ page_pool_release_page_dma(pool, netmem);
+ }
+ return ret;
+}
+
+void page_pool_release_area(struct page_pool *pool,
+ struct net_iov_area *area)
+{
+ int i;
+
+ if (!pool->dma_map)
+ return;
+
+ for (i = 0; i < area->num_niovs; i++) {
+ struct net_iov *niov = &area->niovs[i];
+
+ page_pool_release_page_dma(pool, net_iov_to_netmem(niov));
+ }
+}
--
2.43.5
Powered by blists - more mailing lists