[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191016225028.2100206-7-jonathan.lemon@gmail.com>
Date: Wed, 16 Oct 2019 15:50:24 -0700
From: Jonathan Lemon <jonathan.lemon@...il.com>
To: <brouer@...hat.com>, <ilias.apalodimas@...aro.org>,
<saeedm@...lanox.com>, <tariqt@...lanox.com>
CC: <netdev@...r.kernel.org>, <kernel-team@...com>
Subject: [PATCH 06/10 net-next] page_pool: Add page_pool_keep_page
When releasing a page to the pool, only retain the
page if page_pool_keep_page() returns true.
Do not flush the page pool on node changes, but instead
lazily discard the pages as they are returned.
Signed-off-by: Jonathan Lemon <jonathan.lemon@...il.com>
---
include/net/page_pool.h | 2 --
net/core/page_pool.c | 39 +++++++++++++++++++++++----------------
2 files changed, 23 insertions(+), 18 deletions(-)
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index fb13cf6055ff..89bc91294b53 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -227,12 +227,10 @@ static inline bool page_pool_put(struct page_pool *pool)
}
/* Only safe from napi context or when user guarantees it is thread safe */
-void __page_pool_flush(struct page_pool *pool);
static inline void page_pool_update_nid(struct page_pool *pool, int new_nid)
{
if (unlikely(pool->p.nid != new_nid)) {
/* TODO: Add statistics/trace */
- __page_pool_flush(pool);
pool->p.nid = new_nid;
}
}
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 678cf85f273a..ea56823236c5 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -258,6 +258,7 @@ static bool __page_pool_recycle_into_ring(struct page_pool *pool,
struct page *page)
{
int ret;
+
/* BH protection not needed if current is serving softirq */
if (in_serving_softirq())
ret = ptr_ring_produce(&pool->ring, page);
@@ -272,8 +273,8 @@ static bool __page_pool_recycle_into_ring(struct page_pool *pool,
*
* Caller must provide appropriate safe context.
*/
-static bool __page_pool_recycle_direct(struct page *page,
- struct page_pool *pool)
+static bool __page_pool_recycle_into_cache(struct page *page,
+ struct page_pool *pool)
{
if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
return false;
@@ -283,27 +284,35 @@ static bool __page_pool_recycle_direct(struct page *page,
return true;
}
+/* Determine whether this page should be kept or returned
+ *
+ * refcnt == 1 means page_pool owns page.
+ */
+static bool page_pool_keep_page(struct page_pool *pool, struct page *page)
+{
+ return page_ref_count(page) == 1 &&
+ page_to_nid(page) == pool->p.nid &&
+ !page_is_pfmemalloc(page);
+}
+
void __page_pool_put_page(struct page_pool *pool,
struct page *page, bool allow_direct)
{
/* This allocator is optimized for the XDP mode that uses
* one-frame-per-page, but have fallbacks that act like the
* regular page allocator APIs.
- *
- * refcnt == 1 means page_pool owns page, and can recycle it.
*/
- if (likely(page_ref_count(page) == 1)) {
+ if (likely(page_pool_keep_page(pool, page))) {
/* Read barrier done in page_ref_count / READ_ONCE */
if (allow_direct && in_serving_softirq())
- if (__page_pool_recycle_direct(page, pool))
+ if (__page_pool_recycle_into_cache(page, pool))
return;
- if (!__page_pool_recycle_into_ring(pool, page)) {
- /* Cache full, fallback to free pages */
- __page_pool_return_page(pool, page);
- }
- return;
+ if (__page_pool_recycle_into_ring(pool, page))
+ return;
+
+ /* Cache full, fallback to return pages */
}
/* Fallback/non-XDP mode: API user have elevated refcnt.
*
@@ -318,8 +327,7 @@ void __page_pool_put_page(struct page_pool *pool,
* doing refcnt based recycle tricks, meaning another process
* will be invoking put_page.
*/
- __page_pool_clean_page(pool, page);
- put_page(page);
+ __page_pool_return_page(pool, page);
}
EXPORT_SYMBOL(__page_pool_put_page);
@@ -373,7 +381,7 @@ void __page_pool_free(struct page_pool *pool)
}
EXPORT_SYMBOL(__page_pool_free);
-void __page_pool_flush(struct page_pool *pool)
+static void page_pool_flush(struct page_pool *pool)
{
struct page *page;
@@ -391,14 +399,13 @@ void __page_pool_flush(struct page_pool *pool)
*/
__page_pool_empty_ring(pool);
}
-EXPORT_SYMBOL(__page_pool_flush);
/* Request to shutdown: release pages cached by page_pool, and check
* for in-flight pages
*/
bool __page_pool_request_shutdown(struct page_pool *pool)
{
- __page_pool_flush(pool);
+ page_pool_flush(pool);
return __page_pool_safe_to_destroy(pool);
}
--
2.17.1
Powered by blists - more mailing lists