[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230111042214.907030-13-willy@infradead.org>
Date: Wed, 11 Jan 2023 04:22:00 +0000
From: "Matthew Wilcox (Oracle)" <willy@...radead.org>
To: Jesper Dangaard Brouer <hawk@...nel.org>,
Ilias Apalodimas <ilias.apalodimas@...aro.org>
Cc: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
netdev@...r.kernel.org, linux-mm@...ck.org,
Shakeel Butt <shakeelb@...gle.com>,
Jesper Dangaard Brouer <brouer@...hat.com>,
Jesse Brandeburg <jesse.brandeburg@...el.com>
Subject: [PATCH v3 12/26] page_pool: Convert page_pool_alloc_pages() to page_pool_alloc_netmem()
Add wrappers for page_pool_alloc_pages() and
page_pool_dev_alloc_netmem(). Also convert __page_pool_alloc_pages_slow()
to __page_pool_alloc_netmem_slow() and __page_pool_alloc_page_order()
to __page_pool_alloc_netmem(). __page_pool_get_cached() now returns
a netmem.
Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
Acked-by: Jesper Dangaard Brouer <brouer@...hat.com>
Reviewed-by: Ilias Apalodimas <ilias.apalodimas@...aro.org>
Reviewed-by: Jesse Brandeburg <jesse.brandeburg@...el.com>
---
include/net/page_pool.h | 16 ++++++++++++++--
net/core/page_pool.c | 39 +++++++++++++++++++--------------------
2 files changed, 33 insertions(+), 22 deletions(-)
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 60354e771fdd..a568d94043af 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -13,7 +13,7 @@
* regular page allocator APIs.
*
* Basic use involve replacing alloc_pages() calls with the
- * page_pool_alloc_pages() call. Drivers should likely use
+ * page_pool_alloc_netmem() call. Drivers should likely use
* page_pool_dev_alloc_pages() replacing dev_alloc_pages().
*
* API keeps track of in-flight pages, in-order to let API user know
@@ -314,7 +314,19 @@ struct page_pool {
u64 destroy_cnt;
};
-struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+struct netmem *page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp);
+
+static inline struct netmem *page_pool_dev_alloc_netmem(struct page_pool *pool)
+{
+ return page_pool_alloc_netmem(pool, GFP_ATOMIC | __GFP_NOWARN);
+}
+
+/* Compat, remove when all users gone */
+static inline
+struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
+{
+ return netmem_page(page_pool_alloc_netmem(pool, gfp));
+}
static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
{
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 0212244e07e7..c7ea487acbaa 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -282,7 +282,7 @@ static struct netmem *page_pool_refill_alloc_cache(struct page_pool *pool)
}
/* fast path */
-static struct page *__page_pool_get_cached(struct page_pool *pool)
+static struct netmem *__page_pool_get_cached(struct page_pool *pool)
{
struct netmem *nmem;
@@ -295,7 +295,7 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
nmem = page_pool_refill_alloc_cache(pool);
}
- return netmem_page(nmem);
+ return nmem;
}
static void page_pool_dma_sync_for_device(struct page_pool *pool,
@@ -349,8 +349,8 @@ static void page_pool_clear_pp_info(struct netmem *nmem)
nmem->pp = NULL;
}
-static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
- gfp_t gfp)
+static
+struct netmem *__page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp)
{
struct netmem *nmem;
@@ -371,27 +371,27 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
trace_page_pool_state_hold(pool, nmem, pool->pages_state_hold_cnt);
- return netmem_page(nmem);
+ return nmem;
}
/* slow path */
noinline
-static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
+static struct netmem *__page_pool_alloc_netmem_slow(struct page_pool *pool,
gfp_t gfp)
{
const int bulk = PP_ALLOC_CACHE_REFILL;
unsigned int pp_flags = pool->p.flags;
unsigned int pp_order = pool->p.order;
- struct page *page;
+ struct netmem *nmem;
int i, nr_pages;
/* Don't support bulk alloc for high-order pages */
if (unlikely(pp_order))
- return __page_pool_alloc_page_order(pool, gfp);
+ return __page_pool_alloc_netmem(pool, gfp);
/* Unnecessary as alloc cache is empty, but guarantees zero count */
if (unlikely(pool->alloc.count > 0))
- return netmem_page(pool->alloc.cache[--pool->alloc.count]);
+ return pool->alloc.cache[--pool->alloc.count];
/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
@@ -422,34 +422,33 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
/* Return last page */
if (likely(pool->alloc.count > 0)) {
- page = netmem_page(pool->alloc.cache[--pool->alloc.count]);
+ nmem = pool->alloc.cache[--pool->alloc.count];
alloc_stat_inc(pool, slow);
} else {
- page = NULL;
+ nmem = NULL;
}
/* When page just allocated it should have refcnt 1 (but may have
* speculative references) */
- return page;
+ return nmem;
}
/* For using page_pool replace: alloc_pages() API calls, but provide
* synchronization guarantee for allocation side.
*/
-struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
+struct netmem *page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp)
{
- struct page *page;
+ struct netmem *nmem;
/* Fast-path: Get a page from cache */
- page = __page_pool_get_cached(pool);
- if (page)
- return page;
+ nmem = __page_pool_get_cached(pool);
+ if (nmem)
+ return nmem;
/* Slow-path: cache empty, do real allocation */
- page = __page_pool_alloc_pages_slow(pool, gfp);
- return page;
+ return __page_pool_alloc_netmem_slow(pool, gfp);
}
-EXPORT_SYMBOL(page_pool_alloc_pages);
+EXPORT_SYMBOL(page_pool_alloc_netmem);
/* Calculate distance between two u32 values, valid if distance is below 2^(31)
* https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
--
2.35.1
Powered by blists - more mailing lists