lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250221115221.291006-2-bigeasy@linutronix.de>
Date: Fri, 21 Feb 2025 12:52:20 +0100
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
To: linux-rdma@...r.kernel.org,
	netdev@...r.kernel.org
Cc: "David S. Miller" <davem@...emloft.net>,
	Andrew Lunn <andrew+netdev@...n.ch>,
	Eric Dumazet <edumazet@...gle.com>,
	Ilias Apalodimas <ilias.apalodimas@...aro.org>,
	Jakub Kicinski <kuba@...nel.org>,
	Jesper Dangaard Brouer <hawk@...nel.org>,
	Leon Romanovsky <leon@...nel.org>,
	Paolo Abeni <pabeni@...hat.com>,
	Saeed Mahameed <saeedm@...dia.com>,
	Simon Horman <horms@...nel.org>,
	Tariq Toukan <tariqt@...dia.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Yunsheng Lin <linyunsheng@...wei.com>,
	Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Subject: [PATCH net-next 1/2] page_pool: Convert page_pool_recycle_stats to u64_stats_t.

Using u64 for statistics can lead to inconsistency on 32bit because an
update and a read requires to access two 32bit values.
This can be avoided by using u64_stats_t for the counters and
u64_stats_sync for the required synchronisation on 32bit platforms. The
synchronisation is a NOP on 64bit architectures.

Use u64_stats_t for the counters in page_pool_recycle_stats.
Add U64_STATS_ZERO, a static initializer for u64_stats_t.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
---
 Documentation/networking/page_pool.rst        |  4 +-
 .../ethernet/mellanox/mlx5/core/en_stats.c    | 12 ++---
 include/linux/u64_stats_sync.h                |  5 ++
 include/net/page_pool/types.h                 | 13 +++--
 net/core/page_pool.c                          | 50 +++++++++++++------
 net/core/page_pool_user.c                     | 10 ++--
 6 files changed, 61 insertions(+), 33 deletions(-)

diff --git a/Documentation/networking/page_pool.rst b/Documentation/networking/page_pool.rst
index 9d958128a57cb..00431bc88825f 100644
--- a/Documentation/networking/page_pool.rst
+++ b/Documentation/networking/page_pool.rst
@@ -184,8 +184,8 @@ Stats
 	struct page_pool_stats stats = { 0 };
 	if (page_pool_get_stats(page_pool, &stats)) {
 		/* perhaps the driver reports statistics with ethool */
-		ethtool_print_allocation_stats(&stats.alloc_stats);
-		ethtool_print_recycle_stats(&stats.recycle_stats);
+		ethtool_print_allocation_stats(u64_stats_read(&stats.alloc_stats));
+		ethtool_print_recycle_stats(u64_stats_read(&stats.recycle_stats));
 	}
 	#endif
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 611ec4b6f3709..baff961970f25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -501,7 +501,7 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
 {
 	struct mlx5e_rq_stats *rq_stats = c->rq.stats;
 	struct page_pool *pool = c->rq.page_pool;
-	struct page_pool_stats stats = { 0 };
+	struct page_pool_stats stats = { };
 
 	if (!page_pool_get_stats(pool, &stats))
 		return;
@@ -513,11 +513,11 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
 	rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
 	rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
 
-	rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
-	rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
-	rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
-	rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
-	rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
+	rq_stats->pp_recycle_cached = u64_stats_read(&stats.recycle_stats.cached);
+	rq_stats->pp_recycle_cache_full = u64_stats_read(&stats.recycle_stats.cache_full);
+	rq_stats->pp_recycle_ring = u64_stats_read(&stats.recycle_stats.ring);
+	rq_stats->pp_recycle_ring_full = u64_stats_read(&stats.recycle_stats.ring_full);
+	rq_stats->pp_recycle_released_ref = u64_stats_read(&stats.recycle_stats.released_refcnt);
 }
 #else
 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 457879938fc19..086bd4a51cfe9 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -94,6 +94,8 @@ static inline void u64_stats_inc(u64_stats_t *p)
 	local64_inc(&p->v);
 }
 
+#define U64_STATS_ZERO(_member, _name)	{}
+
 static inline void u64_stats_init(struct u64_stats_sync *syncp) { }
 static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) { }
 static inline void __u64_stats_update_end(struct u64_stats_sync *syncp) { }
@@ -141,6 +143,9 @@ static inline void u64_stats_inc(u64_stats_t *p)
 		seqcount_init(&__s->seq);		\
 	} while (0)
 
+#define U64_STATS_ZERO(_member, _name)			\
+	_member.seq	= SEQCNT_ZERO(#_name#_member.seq)
+
 static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
 {
 	preempt_disable_nested();
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 7f405672b089d..c5ad80a542b7d 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -6,6 +6,7 @@
 #include <linux/dma-direction.h>
 #include <linux/ptr_ring.h>
 #include <linux/types.h>
+#include <linux/u64_stats_sync.h>
 #include <net/netmem.h>
 
 #define PP_FLAG_DMA_MAP		BIT(0) /* Should page_pool do the DMA
@@ -114,6 +115,7 @@ struct page_pool_alloc_stats {
 
 /**
  * struct page_pool_recycle_stats - recycling (freeing) statistics
+ * @syncp:	synchronisations point for updates.
  * @cached:	recycling placed page in the page pool cache
  * @cache_full:	page pool cache was full
  * @ring:	page placed into the ptr ring
@@ -121,11 +123,12 @@ struct page_pool_alloc_stats {
  * @released_refcnt:	page released (and not recycled) because refcnt > 1
  */
 struct page_pool_recycle_stats {
-	u64 cached;
-	u64 cache_full;
-	u64 ring;
-	u64 ring_full;
-	u64 released_refcnt;
+	struct u64_stats_sync syncp;
+	u64_stats_t cached;
+	u64_stats_t cache_full;
+	u64_stats_t ring;
+	u64_stats_t ring_full;
+	u64_stats_t released_refcnt;
 };
 
 /**
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index f5e908c9e7ad8..36fa14a1e8441 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -37,21 +37,27 @@ DEFINE_STATIC_KEY_FALSE(page_pool_mem_providers);
 #define BIAS_MAX	(LONG_MAX >> 1)
 
 #ifdef CONFIG_PAGE_POOL_STATS
-static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);
+static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats) = {
+	U64_STATS_ZERO(.syncp, pp_system_recycle_stats),
+};
 
 /* alloc_stat_inc is intended to be used in softirq context */
 #define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)
 /* recycle_stat_inc is safe to use when preemption is possible. */
 #define recycle_stat_inc(pool, __stat)							\
 	do {										\
-		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
-		this_cpu_inc(s->__stat);						\
+		struct page_pool_recycle_stats *s = this_cpu_ptr(pool->recycle_stats);	\
+		u64_stats_update_begin(&s->syncp);					\
+		u64_stats_inc(&s->__stat);						\
+		u64_stats_update_end(&s->syncp);					\
 	} while (0)
 
 #define recycle_stat_add(pool, __stat, val)						\
 	do {										\
-		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
-		this_cpu_add(s->__stat, val);						\
+		struct page_pool_recycle_stats *s = this_cpu_ptr(pool->recycle_stats);	\
+		u64_stats_update_begin(&s->syncp);					\
+		u64_stats_add(&s->__stat, val);						\
+		u64_stats_update_end(&s->syncp);					\
 	} while (0)
 
 static const char pp_stats[][ETH_GSTRING_LEN] = {
@@ -82,6 +88,7 @@ static const char pp_stats[][ETH_GSTRING_LEN] = {
 bool page_pool_get_stats(const struct page_pool *pool,
 			 struct page_pool_stats *stats)
 {
+	unsigned int start;
 	int cpu = 0;
 
 	if (!stats)
@@ -99,11 +106,19 @@ bool page_pool_get_stats(const struct page_pool *pool,
 		const struct page_pool_recycle_stats *pcpu =
 			per_cpu_ptr(pool->recycle_stats, cpu);
 
-		stats->recycle_stats.cached += pcpu->cached;
-		stats->recycle_stats.cache_full += pcpu->cache_full;
-		stats->recycle_stats.ring += pcpu->ring;
-		stats->recycle_stats.ring_full += pcpu->ring_full;
-		stats->recycle_stats.released_refcnt += pcpu->released_refcnt;
+		do {
+			start = u64_stats_fetch_begin(&pcpu->syncp);
+			u64_stats_add(&stats->recycle_stats.cached,
+				      u64_stats_read(&pcpu->cached));
+			u64_stats_add(&stats->recycle_stats.cache_full,
+				      u64_stats_read(&pcpu->cache_full));
+			u64_stats_add(&stats->recycle_stats.ring,
+				      u64_stats_read(&pcpu->ring));
+			u64_stats_add(&stats->recycle_stats.ring_full,
+				      u64_stats_read(&pcpu->ring_full));
+			u64_stats_add(&stats->recycle_stats.released_refcnt,
+				      u64_stats_read(&pcpu->released_refcnt));
+		} while (u64_stats_fetch_retry(&pcpu->syncp, start));
 	}
 
 	return true;
@@ -139,11 +154,11 @@ u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
 	*data++ = pool_stats->alloc_stats.empty;
 	*data++ = pool_stats->alloc_stats.refill;
 	*data++ = pool_stats->alloc_stats.waive;
-	*data++ = pool_stats->recycle_stats.cached;
-	*data++ = pool_stats->recycle_stats.cache_full;
-	*data++ = pool_stats->recycle_stats.ring;
-	*data++ = pool_stats->recycle_stats.ring_full;
-	*data++ = pool_stats->recycle_stats.released_refcnt;
+	*data++ = u64_stats_read(&pool_stats->recycle_stats.cached);
+	*data++ = u64_stats_read(&pool_stats->recycle_stats.cache_full);
+	*data++ = u64_stats_read(&pool_stats->recycle_stats.ring);
+	*data++ = u64_stats_read(&pool_stats->recycle_stats.ring_full);
+	*data++ = u64_stats_read(&pool_stats->recycle_stats.released_refcnt);
 
 	return data;
 }
@@ -247,9 +262,14 @@ static int page_pool_init(struct page_pool *pool,
 
 #ifdef CONFIG_PAGE_POOL_STATS
 	if (!(pool->slow.flags & PP_FLAG_SYSTEM_POOL)) {
+		unsigned int cpu;
+
 		pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
 		if (!pool->recycle_stats)
 			return -ENOMEM;
+
+		for_each_possible_cpu(cpu)
+			u64_stats_init(&per_cpu_ptr(pool->recycle_stats, cpu)->syncp);
 	} else {
 		/* For system page pool instance we use a singular stats object
 		 * instead of allocating a separate percpu variable for each
diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c
index 6677e0c2e2565..0d038c0c8996d 100644
--- a/net/core/page_pool_user.c
+++ b/net/core/page_pool_user.c
@@ -149,15 +149,15 @@ page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool,
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
 			 stats.alloc_stats.waive) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
-			 stats.recycle_stats.cached) ||
+			 u64_stats_read(&stats.recycle_stats.cached)) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
-			 stats.recycle_stats.cache_full) ||
+			 u64_stats_read(&stats.recycle_stats.cache_full)) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
-			 stats.recycle_stats.ring) ||
+			 u64_stats_read(&stats.recycle_stats.ring)) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
-			 stats.recycle_stats.ring_full) ||
+			 u64_stats_read(&stats.recycle_stats.ring_full)) ||
 	    nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
-			 stats.recycle_stats.released_refcnt))
+			 u64_stats_read(&stats.recycle_stats.released_refcnt)))
 		goto err_cancel_msg;
 
 	genlmsg_end(rsp, hdr);
-- 
2.47.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ