lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sun, 30 Apr 2023 10:18:09 -0700
From:   Yury Norov <yury.norov@...il.com>
To:     Jakub Kicinski <kuba@...nel.org>, netdev@...r.kernel.org,
        linux-rdma@...r.kernel.org, linux-kernel@...r.kernel.org
Cc:     Yury Norov <yury.norov@...il.com>,
        Saeed Mahameed <saeedm@...dia.com>,
        Pawel Chmielewski <pawel.chmielewski@...el.com>,
        Leon Romanovsky <leon@...nel.org>,
        "David S. Miller" <davem@...emloft.net>,
        Eric Dumazet <edumazet@...gle.com>,
        Paolo Abeni <pabeni@...hat.com>,
        Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
        Rasmus Villemoes <linux@...musvillemoes.dk>,
        Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Juri Lelli <juri.lelli@...hat.com>,
        Vincent Guittot <vincent.guittot@...aro.org>,
        Dietmar Eggemann <dietmar.eggemann@....com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
        Daniel Bristot de Oliveira <bristot@...hat.com>,
        Valentin Schneider <vschneid@...hat.com>,
        Tariq Toukan <tariqt@...dia.com>,
        Gal Pressman <gal@...dia.com>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        Heiko Carstens <hca@...ux.ibm.com>,
        Barry Song <baohua@...nel.org>
Subject: [PATCH v3 8/8] lib: test for_each_numa_cpus()

Test for_each_numa_cpus() output to ensure that:
 - all CPUs are picked from NUMA nodes with non-decreasing distances to the
   original node; 
 - only online CPUs are enumerated;
 - the macro enumerates each online CPUs only once;
 - enumeration order is consistent with cpumask_local_spread().

The latter is an implementation-defined behavior. If cpumask_local_spread()
or for_each_numa_cpu() will get changed in future, the subtest may need
to be adjusted or even removed, as appropriate.

It's useful now because some architectures don't implement numa_distance(),
and generic implementation only distinguishes local and remote nodes, which
doesn't allow to test the for_each_numa_cpu() properly.

Suggested-by: Valentin Schneider <vschneid@...hat.com> (for node_distance() test)
Signed-off-by: Yury Norov <yury.norov@...il.com>
---
 lib/test_bitmap.c | 70 +++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 68 insertions(+), 2 deletions(-)

diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index a8005ad3bd58..ac4fe621d37b 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -12,6 +12,7 @@
 #include <linux/printk.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/topology.h>
 #include <linux/uaccess.h>
 
 #include "../tools/testing/selftests/kselftest_module.h"
@@ -71,6 +72,16 @@ __check_eq_uint(const char *srcfile, unsigned int line,
 	return true;
 }
 
+static bool __init
+__check_ge_uint(const char *srcfile, unsigned int line,
+		const unsigned int exp_uint, unsigned int x)
+{
+	if (exp_uint >=  x)
+		return true;
+
+	pr_err("[%s:%u] expected >= %u, got %u\n", srcfile, line, exp_uint, x);
+	return false;
+}
 
 static bool __init
 __check_eq_bitmap(const char *srcfile, unsigned int line,
@@ -86,6 +97,18 @@ __check_eq_bitmap(const char *srcfile, unsigned int line,
 	return true;
 }
 
+static bool __init
+__check_eq_cpumask(const char *srcfile, unsigned int line,
+		  const struct cpumask *exp_cpumask, const struct cpumask *cpumask)
+{
+	if (cpumask_equal(exp_cpumask, cpumask))
+		return true;
+
+	pr_warn("[%s:%u] cpumasks contents differ: expected \"%*pbl\", got \"%*pbl\"\n",
+		srcfile, line, cpumask_pr_args(exp_cpumask), cpumask_pr_args(cpumask));
+	return false;
+}
+
 static bool __init
 __check_eq_pbl(const char *srcfile, unsigned int line,
 	       const char *expected_pbl,
@@ -173,11 +196,11 @@ __check_eq_str(const char *srcfile, unsigned int line,
 	return eq;
 }
 
-#define __expect_eq(suffix, ...)					\
+#define __expect(suffix, ...)						\
 	({								\
 		int result = 0;						\
 		total_tests++;						\
-		if (!__check_eq_ ## suffix(__FILE__, __LINE__,		\
+		if (!__check_ ## suffix(__FILE__, __LINE__,		\
 					   ##__VA_ARGS__)) {		\
 			failed_tests++;					\
 			result = 1;					\
@@ -185,13 +208,19 @@ __check_eq_str(const char *srcfile, unsigned int line,
 		result;							\
 	})
 
+#define __expect_eq(suffix, ...)	__expect(eq_ ## suffix, ##__VA_ARGS__)
+#define __expect_ge(suffix, ...)	__expect(ge_ ## suffix, ##__VA_ARGS__)
+
 #define expect_eq_uint(...)		__expect_eq(uint, ##__VA_ARGS__)
 #define expect_eq_bitmap(...)		__expect_eq(bitmap, ##__VA_ARGS__)
+#define expect_eq_cpumask(...)		__expect_eq(cpumask, ##__VA_ARGS__)
 #define expect_eq_pbl(...)		__expect_eq(pbl, ##__VA_ARGS__)
 #define expect_eq_u32_array(...)	__expect_eq(u32_array, ##__VA_ARGS__)
 #define expect_eq_clump8(...)		__expect_eq(clump8, ##__VA_ARGS__)
 #define expect_eq_str(...)		__expect_eq(str, ##__VA_ARGS__)
 
+#define expect_ge_uint(...)		__expect_ge(uint, ##__VA_ARGS__)
+
 static void __init test_zero_clear(void)
 {
 	DECLARE_BITMAP(bmap, 1024);
@@ -751,6 +780,42 @@ static void __init test_for_each_set_bit_wrap(void)
 	}
 }
 
+static void __init test_for_each_numa_cpu(void)
+{
+	unsigned int node, cpu, hop;
+	cpumask_var_t mask;
+
+	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+		pr_err("Can't allocate cpumask. Skipping for_each_numa_cpu() test");
+		return;
+	}
+
+	for_each_node(node) {
+		unsigned int c = 0, dist, old_dist = node_distance(node, node);
+
+		cpumask_clear(mask);
+
+		rcu_read_lock();
+		for_each_numa_cpu(cpu, hop, node, cpu_possible_mask) {
+			dist = node_distance(cpu_to_node(cpu), node);
+
+			/* Distance between nodes must never decrease */
+			expect_ge_uint(dist, old_dist);
+
+			/* Test for coherence with cpumask_local_spread() */
+			expect_eq_uint(cpumask_local_spread(c++, node), cpu);
+
+			cpumask_set_cpu(cpu, mask);
+			old_dist = dist;
+		}
+		rcu_read_unlock();
+
+		/* Each online CPU must be visited exactly once */
+		expect_eq_uint(c, num_online_cpus());
+		expect_eq_cpumask(mask, cpu_online_mask);
+	}
+}
+
 static void __init test_for_each_set_bit(void)
 {
 	DECLARE_BITMAP(orig, 500);
@@ -1237,6 +1302,7 @@ static void __init selftest(void)
 	test_for_each_clear_bitrange_from();
 	test_for_each_set_clump8();
 	test_for_each_set_bit_wrap();
+	test_for_each_numa_cpu();
 }
 
 KSTM_MODULE_LOADERS(test_bitmap);
-- 
2.37.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ