[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230529092840.40413-3-linyunsheng@huawei.com>
Date: Mon, 29 May 2023 17:28:39 +0800
From: Yunsheng Lin <linyunsheng@...wei.com>
To: <davem@...emloft.net>, <kuba@...nel.org>, <pabeni@...hat.com>
CC: <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>, Yunsheng Lin
<linyunsheng@...wei.com>, Lorenzo Bianconi <lorenzo@...nel.org>, Alexander
Duyck <alexander.duyck@...il.com>, Jesper Dangaard Brouer <hawk@...nel.org>,
Ilias Apalodimas <ilias.apalodimas@...aro.org>, Eric Dumazet
<edumazet@...gle.com>
Subject: [PATCH net-next v2 2/3] page_pool: support non-frag page for page_pool_alloc_frag()
There is performance penalty with using page frag support when
user requests a larger frag size and a page only supports one
frag user, see [1].
It seems like user may request different frag size depending
on the mtu and packet size, provide an option to allocate
non-frag page when a whole page is not able to hold two frags,
so that user has a unified interface for the memory allocation
with least memory utilization and performance penalty.
1. https://lore.kernel.org/netdev/ZEU+vospFdm08IeE@localhost.localdomain/
Signed-off-by: Yunsheng Lin <linyunsheng@...wei.com>
CC: Lorenzo Bianconi <lorenzo@...nel.org>
CC: Alexander Duyck <alexander.duyck@...il.com>
---
net/core/page_pool.c | 47 +++++++++++++++++++++++++++-----------------
1 file changed, 29 insertions(+), 18 deletions(-)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 0868aa8f6323..e84ec6eabefd 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -699,14 +699,27 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
unsigned int max_size = PAGE_SIZE << pool->p.order;
struct page *page = pool->frag_page;
- if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
- size > max_size))
+ if (unlikely(size > max_size))
return NULL;
+ if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT) {
+ *offset = 0;
+ return page_pool_alloc_pages(pool, gfp);
+ }
+
size = ALIGN(size, dma_get_cache_alignment());
- *offset = pool->frag_offset;
- if (page && *offset + size > max_size) {
+ if (page) {
+ *offset = pool->frag_offset;
+
+ if (*offset + size <= max_size) {
+ pool->frag_users++;
+ pool->frag_offset = *offset + size;
+ alloc_stat_inc(pool, fast);
+ return page;
+ }
+
+ pool->frag_page = NULL;
page = page_pool_drain_frag(pool, page);
if (page) {
alloc_stat_inc(pool, fast);
@@ -714,26 +727,24 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
}
}
- if (!page) {
- page = page_pool_alloc_pages(pool, gfp);
- if (unlikely(!page)) {
- pool->frag_page = NULL;
- return NULL;
- }
-
- pool->frag_page = page;
+ page = page_pool_alloc_pages(pool, gfp);
+ if (unlikely(!page))
+ return NULL;
frag_reset:
- pool->frag_users = 1;
+ /* return page as non-frag page if a page is not able to
+ * hold two frags for the current requested size.
+ */
+ if (unlikely(size << 1 > max_size)) {
*offset = 0;
- pool->frag_offset = size;
- page_pool_fragment_page(page, BIAS_MAX);
return page;
}
- pool->frag_users++;
- pool->frag_offset = *offset + size;
- alloc_stat_inc(pool, fast);
+ pool->frag_page = page;
+ pool->frag_users = 1;
+ *offset = 0;
+ pool->frag_offset = size;
+ page_pool_fragment_page(page, BIAS_MAX);
return page;
}
EXPORT_SYMBOL(page_pool_alloc_frag);
--
2.33.0
Powered by blists - more mailing lists