lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250221115221.291006-3-bigeasy@linutronix.de>
Date: Fri, 21 Feb 2025 12:52:21 +0100
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: linux-rdma@...r.kernel.org,
	netdev@...r.kernel.org
Cc: "David S. Miller" <davem@...emloft.net>,
	Andrew Lunn <andrew+netdev@...n.ch>,
	Eric Dumazet <edumazet@...gle.com>,
	Ilias Apalodimas <ilias.apalodimas@...aro.org>,
	Jakub Kicinski <kuba@...nel.org>,
	Jesper Dangaard Brouer <hawk@...nel.org>,
	Leon Romanovsky <leon@...nel.org>,
	Paolo Abeni <pabeni@...hat.com>,
	Saeed Mahameed <saeedm@...dia.com>,
	Simon Horman <horms@...nel.org>,
	Tariq Toukan <tariqt@...dia.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Yunsheng Lin <linyunsheng@...wei.com>,
	Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Subject: [PATCH net-next 2/2] page_pool: Convert page_pool_alloc_stats to u64_stats_t.

Using u64 for statistics can lead to inconsistency on 32bit because an
update and a read requires to access two 32bit values.
This can be avoided by using u64_stats_t for the counters and
u64_stats_sync for the required synchronisation on 32bit platforms. The
synchronisation is a NOP on 64bit architectures.

Use u64_stats_t for the counters in page_pool_recycle_stats.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
---
 .../ethernet/mellanox/mlx5/core/en_stats.c    | 12 ++---
 include/net/page_pool/types.h                 | 14 +++---
 net/core/page_pool.c                          | 45 +++++++++++++------
 net/core/page_pool_user.c                     | 12 ++---
 4 files changed, 52 insertions(+), 31 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index baff961970f25..afb5c135b68c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -506,12 +506,12 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
 	if (!page_pool_get_stats(pool, &stats))
 		return;
 
-	rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
-	rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
-	rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
-	rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
-	rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
-	rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
+	rq_stats->pp_alloc_fast = u64_stats_read(&stats.alloc_stats.fast);
+	rq_stats->pp_alloc_slow = u64_stats_read(&stats.alloc_stats.slow);
+	rq_stats->pp_alloc_slow_high_order = u64_stats_read(&stats.alloc_stats.slow_high_order);
+	rq_stats->pp_alloc_empty = u64_stats_read(&stats.alloc_stats.empty);
+	rq_stats->pp_alloc_waive = u64_stats_read(&stats.alloc_stats.waive);
+	rq_stats->pp_alloc_refill = u64_stats_read(&stats.alloc_stats.refill);
 
 	rq_stats->pp_recycle_cached = u64_stats_read(&stats.recycle_stats.cached);
 	rq_stats->pp_recycle_cache_full = u64_stats_read(&stats.recycle_stats.cache_full);
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index c5ad80a542b7d..f45d55e6e8643 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -96,6 +96,7 @@ struct page_pool_params {
 #ifdef CONFIG_PAGE_POOL_STATS
 /**
  * struct page_pool_alloc_stats - allocation statistics
+ * @syncp:	synchronisations point for updates.
  * @fast:	successful fast path allocations
  * @slow:	slow path order-0 allocations
  * @slow_high_order: slow path high order allocations
@@ -105,12 +106,13 @@ struct page_pool_params {
  *		the cache due to a NUMA mismatch
  */
 struct page_pool_alloc_stats {
-	u64 fast;
-	u64 slow;
-	u64 slow_high_order;
-	u64 empty;
-	u64 refill;
-	u64 waive;
+	struct u64_stats_sync syncp;
+	u64_stats_t fast;
+	u64_stats_t slow;
+	u64_stats_t slow_high_order;
+	u64_stats_t empty;
+	u64_stats_t refill;
+	u64_stats_t waive;
 };
 
 /**
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 36fa14a1e8441..d69a03609613b 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -42,7 +42,14 @@ static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats) =
 };
 
 /* alloc_stat_inc is intended to be used in softirq context */
-#define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)
+#define alloc_stat_inc(pool, __stat)						\
+	do {									\
+		struct page_pool_alloc_stats *s = &pool->alloc_stats;		\
+		u64_stats_update_begin(&s->syncp);				\
+		u64_stats_inc(&s->__stat);					\
+		u64_stats_update_end(&s->syncp);				\
+	} while (0)
+
 /* recycle_stat_inc is safe to use when preemption is possible. */
 #define recycle_stat_inc(pool, __stat)							\
 	do {										\
@@ -88,19 +95,30 @@ static const char pp_stats[][ETH_GSTRING_LEN] = {
 bool page_pool_get_stats(const struct page_pool *pool,
 			 struct page_pool_stats *stats)
 {
+	const struct page_pool_alloc_stats *alloc_stats;
 	unsigned int start;
 	int cpu = 0;
 
 	if (!stats)
 		return false;
 
+	alloc_stats = &pool->alloc_stats;
 	/* The caller is responsible to initialize stats. */
-	stats->alloc_stats.fast += pool->alloc_stats.fast;
-	stats->alloc_stats.slow += pool->alloc_stats.slow;
-	stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
-	stats->alloc_stats.empty += pool->alloc_stats.empty;
-	stats->alloc_stats.refill += pool->alloc_stats.refill;
-	stats->alloc_stats.waive += pool->alloc_stats.waive;
+	do {
+		start = u64_stats_fetch_begin(&alloc_stats->syncp);
+		u64_stats_add(&stats->alloc_stats.fast,
+			      u64_stats_read(&alloc_stats->fast));
+		u64_stats_add(&stats->alloc_stats.slow,
+			      u64_stats_read(&alloc_stats->slow));
+		u64_stats_add(&stats->alloc_stats.slow_high_order,
+			      u64_stats_read(&alloc_stats->slow_high_order));
+		u64_stats_add(&stats->alloc_stats.empty,
+			      u64_stats_read(&alloc_stats->empty));
+		u64_stats_add(&stats->alloc_stats.refill,
+			      u64_stats_read(&alloc_stats->refill));
+		u64_stats_add(&stats->alloc_stats.waive,
+			      u64_stats_read(&alloc_stats->waive));
+	} while (u64_stats_fetch_retry(&alloc_stats->syncp, start));
 
 	for_each_possible_cpu(cpu) {
 		const struct page_pool_recycle_stats *pcpu =
@@ -148,12 +166,12 @@ u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
 {
 	const struct page_pool_stats *pool_stats = stats;
 
-	*data++ = pool_stats->alloc_stats.fast;
-	*data++ = pool_stats->alloc_stats.slow;
-	*data++ = pool_stats->alloc_stats.slow_high_order;
-	*data++ = pool_stats->alloc_stats.empty;
-	*data++ = pool_stats->alloc_stats.refill;
-	*data++ = pool_stats->alloc_stats.waive;
+	*data++ = u64_stats_read(&pool_stats->alloc_stats.fast);
+	*data++ = u64_stats_read(&pool_stats->alloc_stats.slow);
+	*data++ = u64_stats_read(&pool_stats->alloc_stats.slow_high_order);
+	*data++ = u64_stats_read(&pool_stats->alloc_stats.empty);
+	*data++ = u64_stats_read(&pool_stats->alloc_stats.refill);
+	*data++ = u64_stats_read(&pool_stats->alloc_stats.waive);
 	*data++ = u64_stats_read(&pool_stats->recycle_stats.cached);
 	*data++ = u64_stats_read(&pool_stats->recycle_stats.cache_full);
 	*data++ = u64_stats_read(&pool_stats->recycle_stats.ring);
@@ -278,6 +296,7 @@ static int page_pool_init(struct page_pool *pool,
 		pool->recycle_stats = &pp_system_recycle_stats;
 		pool->system = true;
 	}
+	u64_stats_init(&pool->alloc_stats.syncp);
 #endif
 
 	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index 0d038c0c8996d..c368cb141147f 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -137,17 +137,17 @@ page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool,
 	nla_nest_end(rsp, nest);
 
 	if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST,
-			 stats.alloc_stats.fast) ||
+			 u64_stats_read(&stats.alloc_stats.fast)) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
-			 stats.alloc_stats.slow) ||
+			 u64_stats_read(&stats.alloc_stats.slow)) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
-			 stats.alloc_stats.slow_high_order) ||
+			 u64_stats_read(&stats.alloc_stats.slow_high_order)) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
-			 stats.alloc_stats.empty) ||
+			 u64_stats_read(&stats.alloc_stats.empty)) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
-			 stats.alloc_stats.refill) ||
+			 u64_stats_read(&stats.alloc_stats.refill)) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
-			 stats.alloc_stats.waive) ||
+			 u64_stats_read(&stats.alloc_stats.waive)) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
 			 u64_stats_read(&stats.recycle_stats.cached)) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
-- 
2.47.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ