[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250509115126.63190-6-byungchul@sk.com>
Date: Fri, 9 May 2025 20:51:12 +0900
From: Byungchul Park <byungchul@...com>
To: willy@...radead.org,
netdev@...r.kernel.org
Cc: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
kernel_team@...ynix.com,
kuba@...nel.org,
almasrymina@...gle.com,
ilias.apalodimas@...aro.org,
harry.yoo@...cle.com,
hawk@...nel.org,
akpm@...ux-foundation.org,
ast@...nel.org,
daniel@...earbox.net,
davem@...emloft.net,
john.fastabend@...il.com,
andrew+netdev@...n.ch,
edumazet@...gle.com,
pabeni@...hat.com,
vishal.moola@...il.com
Subject: [RFC 05/19] page_pool: use netmem alloc/put API in __page_pool_alloc_pages_slow()
Use netmem alloc/put API instead of page alloc/put API in
__page_pool_alloc_pages_slow().
While at it, improved some comments.
Signed-off-by: Byungchul Park <byungchul@...com>
---
net/core/page_pool.c | 23 ++++++++++++-----------
1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index c03caa11fc606..57ad133e6dc8c 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -531,7 +531,7 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
unsigned int pp_order = pool->p.order;
bool dma_map = pool->dma_map;
netmem_ref netmem;
- int i, nr_pages;
+ int i, nr_netmems;
/* Don't support bulk alloc for high-order pages */
if (unlikely(pp_order))
@@ -541,21 +541,21 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
if (unlikely(pool->alloc.count > 0))
return pool->alloc.cache[--pool->alloc.count];
- /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */
+ /* Mark empty alloc.cache slots "empty" for alloc_netmems_bulk_node() */
memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
- nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk,
- (struct page **)pool->alloc.cache);
- if (unlikely(!nr_pages))
+ nr_netmems = alloc_netmems_bulk_node(gfp, pool->p.nid, bulk,
+ pool->alloc.cache);
+ if (unlikely(!nr_netmems))
return 0;
- /* Pages have been filled into alloc.cache array, but count is zero and
- * page element have not been (possibly) DMA mapped.
+ /* Netmems have been filled into alloc.cache array, but count is
+ * zero and elements have not been (possibly) DMA mapped.
*/
- for (i = 0; i < nr_pages; i++) {
+ for (i = 0; i < nr_netmems; i++) {
netmem = pool->alloc.cache[i];
if (dma_map && unlikely(!page_pool_dma_map(pool, netmem))) {
- put_page(netmem_to_page(netmem));
+ put_netmem(netmem);
continue;
}
@@ -567,7 +567,7 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
pool->pages_state_hold_cnt);
}
- /* Return last page */
+ /* Return the last netmem */
if (likely(pool->alloc.count > 0)) {
netmem = pool->alloc.cache[--pool->alloc.count];
alloc_stat_inc(pool, slow);
@@ -575,7 +575,8 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool,
netmem = 0;
}
- /* When page just alloc'ed is should/must have refcnt 1. */
+ /* When a netmem has been just allocated, it should/must have
+ * refcnt 1. */
return netmem;
}
--
2.17.1
Powered by blists - more mailing lists