lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9f0a571c1f322ff6c4e6facfd7d6d508e73a8f2f.1706451150.git.lorenzo@kernel.org>
Date: Sun, 28 Jan 2024 15:20:40 +0100
From: Lorenzo Bianconi <lorenzo@...nel.org>
To: netdev@...r.kernel.org
Cc: lorenzo.bianconi@...hat.com,
	davem@...emloft.net,
	kuba@...nel.org,
	edumazet@...gle.com,
	pabeni@...hat.com,
	bpf@...r.kernel.org,
	toke@...hat.com,
	willemdebruijn.kernel@...il.com,
	jasowang@...hat.com,
	sdf@...gle.com,
	hawk@...nel.org,
	ilias.apalodimas@...aro.org
Subject: [PATCH v6 net-next 4/5] net: page_pool: make stats available just for global pools

Move page_pool stats allocation in page_pool_create routine and get rid
of it for percpu page_pools.

Signed-off-by: Lorenzo Bianconi <lorenzo@...nel.org>
---
 net/core/page_pool.c | 38 +++++++++++++++++++++++---------------
 1 file changed, 23 insertions(+), 15 deletions(-)

diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 89c835fcf094..5278ffef6442 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -37,13 +37,15 @@
 #define recycle_stat_inc(pool, __stat)							\
 	do {										\
 		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
-		this_cpu_inc(s->__stat);						\
+		if (s)									\
+			this_cpu_inc(s->__stat);					\
 	} while (0)
 
 #define recycle_stat_add(pool, __stat, val)						\
 	do {										\
 		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
-		this_cpu_add(s->__stat, val);						\
+		if (s)									\
+			this_cpu_add(s->__stat, val);					\
 	} while (0)
 
 static const char pp_stats[][ETH_GSTRING_LEN] = {
@@ -79,6 +81,9 @@ bool page_pool_get_stats(const struct page_pool *pool,
 	if (!stats)
 		return false;
 
+	if (!pool->recycle_stats)
+		return false;
+
 	/* The caller is responsible to initialize stats. */
 	stats->alloc_stats.fast += pool->alloc_stats.fast;
 	stats->alloc_stats.slow += pool->alloc_stats.slow;
@@ -218,19 +223,8 @@ static int page_pool_init(struct page_pool *pool,
 	}
 
 	pool->has_init_callback = !!pool->slow.init_callback;
-
-#ifdef CONFIG_PAGE_POOL_STATS
-	pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
-	if (!pool->recycle_stats)
+	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
 		return -ENOMEM;
-#endif
-
-	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
-#ifdef CONFIG_PAGE_POOL_STATS
-		free_percpu(pool->recycle_stats);
-#endif
-		return -ENOMEM;
-	}
 
 	atomic_set(&pool->pages_state_release_cnt, 0);
 
@@ -295,7 +289,21 @@ EXPORT_SYMBOL(page_pool_create_percpu);
  */
 struct page_pool *page_pool_create(const struct page_pool_params *params)
 {
-	return page_pool_create_percpu(params, -1);
+	struct page_pool *pool;
+
+	pool = page_pool_create_percpu(params, -1);
+	if (IS_ERR(pool))
+		return pool;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+	pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
+	if (!pool->recycle_stats) {
+		page_pool_uninit(pool);
+		kfree(pool);
+		pool = ERR_PTR(-ENOMEM);
+	}
+#endif
+	return pool;
 }
 EXPORT_SYMBOL(page_pool_create);
 
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ