[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <168244294384.1741095.6037010854411310099.stgit@firesoul>
Date: Tue, 25 Apr 2023 19:15:43 +0200
From: Jesper Dangaard Brouer <brouer@...hat.com>
To: Ilias Apalodimas <ilias.apalodimas@...aro.org>,
netdev@...r.kernel.org, Eric Dumazet <eric.dumazet@...il.com>,
linux-mm@...ck.org, Mel Gorman <mgorman@...hsingularity.net>
Cc: Jesper Dangaard Brouer <brouer@...hat.com>, lorenzo@...nel.org,
Toke Høiland-Jørgensen <toke@...hat.com>,
linyunsheng@...wei.com, bpf@...r.kernel.org,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>, willy@...radead.org
Subject: [PATCH RFC net-next/mm V1 2/3] page_pool: Use static_key for shutdown
phase
Performance is very important for page pool (PP). This add the use of
static_key APIs for regaining a single instruction, which makes the
new PP shutdown scheme zero impact.
We are uncertain if this is 100% correct, because static_key APIs uses
a mutex lock and it is uncertain if all contexts that can return pages
can support this. We could spawn a workqueue (like we just removed) to
workaround this issue.
Seeking input if this is worth the complexity.
Signed-off-by: Jesper Dangaard Brouer <brouer@...hat.com>
---
net/core/page_pool.c | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index ce7e8dda6403..3821d8874b15 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -25,6 +25,8 @@
#define BIAS_MAX LONG_MAX
+DEFINE_STATIC_KEY_FALSE(pp_shutdown_phase);
+
#ifdef CONFIG_PAGE_POOL_STATS
/* alloc_stat_inc is intended to be used in softirq context */
#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
@@ -378,7 +380,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
int i, nr_pages;
/* API usage BUG: PP in shutdown phase, cannot alloc new pages */
- if (WARN_ON(pool->p.flags & PP_FLAG_SHUTDOWN))
+ if (static_key_enabled(&pp_shutdown_phase) &&
+ WARN_ON(pool->p.flags & PP_FLAG_SHUTDOWN))
return NULL;
/* Don't support bulk alloc for high-order pages */
@@ -609,7 +612,7 @@ void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
recycle_stat_inc(pool, ring_full);
page_pool_return_page(pool, page);
}
- if (pool->p.flags & PP_FLAG_SHUTDOWN)
+ if (static_branch_unlikely(&pp_shutdown_phase))
page_pool_shutdown_attempt(pool);
}
EXPORT_SYMBOL(page_pool_put_defragged_page);
@@ -659,7 +662,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
page_pool_return_page(pool, data[i]);
out:
- if (pool->p.flags & PP_FLAG_SHUTDOWN)
+ if (static_branch_unlikely(&pp_shutdown_phase))
page_pool_shutdown_attempt(pool);
}
EXPORT_SYMBOL(page_pool_put_page_bulk);
@@ -817,7 +820,15 @@ static int page_pool_release(struct page_pool *pool)
noinline
static void page_pool_shutdown_attempt(struct page_pool *pool)
{
- page_pool_release(pool);
+ int inflight;
+
+ if (!(pool->p.flags & PP_FLAG_SHUTDOWN))
+ return;
+
+ inflight = page_pool_release(pool);
+
+ if (static_key_enabled(&pp_shutdown_phase) && !inflight)
+ static_branch_dec(&pp_shutdown_phase);
}
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
@@ -861,6 +872,7 @@ void page_pool_destroy(struct page_pool *pool)
* Enter into shutdown phase, and retry release to handle races.
*/
pool->p.flags |= PP_FLAG_SHUTDOWN;
+ static_branch_inc(&pp_shutdown_phase);
page_pool_shutdown_attempt(pool);
}
EXPORT_SYMBOL(page_pool_destroy);
Powered by blists - more mailing lists