[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230516124801.2465-3-linyunsheng@huawei.com>
Date: Tue, 16 May 2023 20:48:00 +0800
From: Yunsheng Lin <linyunsheng@...wei.com>
To: <davem@...emloft.net>, <kuba@...nel.org>, <pabeni@...hat.com>
CC: <netdev@...r.kernel.org>, <linux-kernel@...r.kernel.org>, Yunsheng Lin
<linyunsheng@...wei.com>, Lorenzo Bianconi <lorenzo@...nel.org>, Alexander
Duyck <alexander.duyck@...il.com>, Jesper Dangaard Brouer <hawk@...nel.org>,
Ilias Apalodimas <ilias.apalodimas@...aro.org>, Eric Dumazet
<edumazet@...gle.com>
Subject: [RFC 2/3] page_pool: support non-frag page for page_pool_alloc_frag()
There is performance penalty with using page frag support when
user requests a larger frag size and a page only supports one
frag user, see [1].
It seems like user may request different frag size depending
on the mtu and packet size, provide an option to allocate
non-frag page when user has requested a frag size larger than
a specific size, so that user has a unified interface for the
memory allocation with least memory utilization and performance
penalty.
1. https://lore.kernel.org/netdev/ZEU+vospFdm08IeE@localhost.localdomain/
Signed-off-by: Yunsheng Lin <linyunsheng@...wei.com>
CC: Lorenzo Bianconi <lorenzo@...nel.org>
CC: Alexander Duyck <alexander.duyck@...il.com>
---
include/net/page_pool.h | 9 +++++++++
net/core/page_pool.c | 10 ++++++++--
2 files changed, 17 insertions(+), 2 deletions(-)
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 14ac46297ae4..d1c57c0c8f49 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -163,6 +163,7 @@ struct page_pool {
unsigned int frag_offset;
struct page *frag_page;
long frag_users;
+ unsigned int max_frag_size;
#ifdef CONFIG_PAGE_POOL_STATS
/* these stats are incremented while in softirq context */
@@ -213,6 +214,14 @@ struct page_pool {
u64 destroy_cnt;
};
+/* Called after page_pool_create() */
+static inline void page_pool_set_max_frag_size(struct page_pool *pool,
+ unsigned int max_frag_size)
+{
+ pool->max_frag_size = min_t(unsigned int, max_frag_size,
+ PAGE_SIZE << pool->p.order);
+}
+
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 5d93c5dc0549..aab6147f28af 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -198,6 +198,8 @@ static int page_pool_init(struct page_pool *pool,
if (pool->p.flags & PP_FLAG_DMA_MAP)
get_device(pool->p.dev);
+ page_pool_set_max_frag_size(pool, PAGE_SIZE << pool->p.order);
+
return 0;
}
@@ -699,10 +701,14 @@ struct page *page_pool_alloc_frag(struct page_pool *pool,
unsigned int max_size = PAGE_SIZE << pool->p.order;
struct page *page = pool->frag_page;
- if (WARN_ON(PAGE_POOL_DMA_USE_PP_FRAG_COUNT ||
- size > max_size))
+ if (WARN_ON(PAGE_POOL_DMA_USE_PP_FRAG_COUNT))
return NULL;
+ if (unlikely(size > pool->max_frag_size)) {
+ *offset = 0;
+ return page_pool_alloc_pages(pool, gfp);
+ }
+
size = ALIGN(size, dma_get_cache_alignment());
*offset = pool->frag_offset;
--
2.33.0
Powered by blists - more mailing lists