[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250307115722.705311-6-bigeasy@linutronix.de>
Date: Fri, 7 Mar 2025 12:57:22 +0100
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: linux-rdma@...r.kernel.org,
netdev@...r.kernel.org
Cc: "David S. Miller" <davem@...emloft.net>,
Andrew Lunn <andrew+netdev@...n.ch>,
Eric Dumazet <edumazet@...gle.com>,
Ilias Apalodimas <ilias.apalodimas@...aro.org>,
Jakub Kicinski <kuba@...nel.org>,
Jesper Dangaard Brouer <hawk@...nel.org>,
Joe Damato <jdamato@...tly.com>,
Leon Romanovsky <leon@...nel.org>,
Paolo Abeni <pabeni@...hat.com>,
Saeed Mahameed <saeedm@...dia.com>,
Simon Horman <horms@...nel.org>,
Tariq Toukan <tariqt@...dia.com>,
Thomas Gleixner <tglx@...utronix.de>,
Yunsheng Lin <linyunsheng@...wei.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Subject: [PATCH net-next v2 5/5] page_pool: Convert page_pool_alloc_stats to u64_stats_t.
Using u64 for statistics can lead to inconsistency on 32bit because an
update and a read requires to access two 32bit values.
This can be avoided by using u64_stats_t for the counters and
u64_stats_sync for the required synchronisation on 32bit platforms. The
synchronisation is a NOP on 64bit architectures.
Use u64_stats_t for the counters in page_pool_alloc_stats.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
---
include/net/page_pool/types.h | 14 ++++++-----
net/core/page_pool.c | 47 +++++++++++++++++++++++++----------
net/core/page_pool_user.c | 12 ++++-----
3 files changed, 48 insertions(+), 25 deletions(-)
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index daf989d01436e..78984b9286c6b 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -96,6 +96,7 @@ struct page_pool_params {
#ifdef CONFIG_PAGE_POOL_STATS
/**
* struct page_pool_alloc_stats - allocation statistics
+ * @syncp: synchronisations point for updates.
* @fast: successful fast path allocations
* @slow: slow path order-0 allocations
* @slow_high_order: slow path high order allocations
@@ -105,12 +106,13 @@ struct page_pool_params {
* the cache due to a NUMA mismatch
*/
struct page_pool_alloc_stats {
- u64 fast;
- u64 slow;
- u64 slow_high_order;
- u64 empty;
- u64 refill;
- u64 waive;
+ struct u64_stats_sync syncp;
+ u64_stats_t fast;
+ u64_stats_t slow;
+ u64_stats_t slow_high_order;
+ u64_stats_t empty;
+ u64_stats_t refill;
+ u64_stats_t waive;
};
/**
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 312bdc5b5a8bf..9f4a390964195 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -42,7 +42,14 @@ static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats) =
};
/* alloc_stat_inc is intended to be used in softirq context */
-#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
+#define alloc_stat_inc(pool, __stat) \
+ do { \
+ struct page_pool_alloc_stats *s = &pool->alloc_stats; \
+ u64_stats_update_begin(&s->syncp); \
+ u64_stats_inc(&s->__stat); \
+ u64_stats_update_end(&s->syncp); \
+ } while (0)
+
/* recycle_stat_inc is safe to use when preemption is possible. */
#define recycle_stat_inc(pool, __stat) \
do { \
@@ -102,19 +109,32 @@ static const char pp_stats_mq[][ETH_GSTRING_LEN] = {
bool page_pool_get_stats(const struct page_pool *pool,
struct page_pool_stats *stats)
{
+ u64 fast, slow, slow_high_order, empty, refill, waive;
+ const struct page_pool_alloc_stats *alloc_stats;
unsigned int start;
int cpu = 0;
if (!stats)
return false;
+ alloc_stats = &pool->alloc_stats;
/* The caller is responsible to initialize stats. */
- stats->alloc_stats.fast += pool->alloc_stats.fast;
- stats->alloc_stats.slow += pool->alloc_stats.slow;
- stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
- stats->alloc_stats.empty += pool->alloc_stats.empty;
- stats->alloc_stats.refill += pool->alloc_stats.refill;
- stats->alloc_stats.waive += pool->alloc_stats.waive;
+ do {
+ start = u64_stats_fetch_begin(&alloc_stats->syncp);
+ fast = u64_stats_read(&alloc_stats->fast);
+ slow = u64_stats_read(&alloc_stats->slow);
+ slow_high_order = u64_stats_read(&alloc_stats->slow_high_order);
+ empty = u64_stats_read(&alloc_stats->empty);
+ refill = u64_stats_read(&alloc_stats->refill);
+ waive = u64_stats_read(&alloc_stats->waive);
+ } while (u64_stats_fetch_retry(&alloc_stats->syncp, start));
+
+ u64_stats_add(&stats->alloc_stats.fast, fast);
+ u64_stats_add(&stats->alloc_stats.slow, slow);
+ u64_stats_add(&stats->alloc_stats.slow_high_order, slow_high_order);
+ u64_stats_add(&stats->alloc_stats.empty, empty);
+ u64_stats_add(&stats->alloc_stats.refill, refill);
+ u64_stats_add(&stats->alloc_stats.waive, waive);
for_each_possible_cpu(cpu) {
u64 cached, cache_full, ring, ring_full, released_refcnt;
@@ -173,12 +193,12 @@ u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
{
const struct page_pool_stats *pool_stats = stats;
- *data++ = pool_stats->alloc_stats.fast;
- *data++ = pool_stats->alloc_stats.slow;
- *data++ = pool_stats->alloc_stats.slow_high_order;
- *data++ = pool_stats->alloc_stats.empty;
- *data++ = pool_stats->alloc_stats.refill;
- *data++ = pool_stats->alloc_stats.waive;
+ *data++ = u64_stats_read(&pool_stats->alloc_stats.fast);
+ *data++ = u64_stats_read(&pool_stats->alloc_stats.slow);
+ *data++ = u64_stats_read(&pool_stats->alloc_stats.slow_high_order);
+ *data++ = u64_stats_read(&pool_stats->alloc_stats.empty);
+ *data++ = u64_stats_read(&pool_stats->alloc_stats.refill);
+ *data++ = u64_stats_read(&pool_stats->alloc_stats.waive);
*data++ = u64_stats_read(&pool_stats->recycle_stats.cached);
*data++ = u64_stats_read(&pool_stats->recycle_stats.cache_full);
*data++ = u64_stats_read(&pool_stats->recycle_stats.ring);
@@ -303,6 +323,7 @@ static int page_pool_init(struct page_pool *pool,
pool->recycle_stats = &pp_system_recycle_stats;
pool->system = true;
}
+ u64_stats_init(&pool->alloc_stats.syncp);
#endif
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index 0d038c0c8996d..c368cb141147f 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -137,17 +137,17 @@ page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool,
nla_nest_end(rsp, nest);
if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST,
- stats.alloc_stats.fast) ||
+ u64_stats_read(&stats.alloc_stats.fast)) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
- stats.alloc_stats.slow) ||
+ u64_stats_read(&stats.alloc_stats.slow)) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
- stats.alloc_stats.slow_high_order) ||
+ u64_stats_read(&stats.alloc_stats.slow_high_order)) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
- stats.alloc_stats.empty) ||
+ u64_stats_read(&stats.alloc_stats.empty)) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
- stats.alloc_stats.refill) ||
+ u64_stats_read(&stats.alloc_stats.refill)) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
- stats.alloc_stats.waive) ||
+ u64_stats_read(&stats.alloc_stats.waive)) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
u64_stats_read(&stats.recycle_stats.cached)) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
--
2.47.2
Powered by blists - more mailing lists