lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20260119133417.19739-1-leon.hwang@linux.dev>
Date: Mon, 19 Jan 2026 21:34:17 +0800
From: Leon Hwang <leon.hwang@...ux.dev>
To: bpf@...r.kernel.org
Cc: Alexei Starovoitov <ast@...nel.org>,
	Daniel Borkmann <daniel@...earbox.net>,
	Andrii Nakryiko <andrii@...nel.org>,
	Martin KaFai Lau <martin.lau@...ux.dev>,
	Eduard Zingerman <eddyz87@...il.com>,
	Song Liu <song@...nel.org>,
	Yonghong Song <yonghong.song@...ux.dev>,
	John Fastabend <john.fastabend@...il.com>,
	KP Singh <kpsingh@...nel.org>,
	Stanislav Fomichev <sdf@...ichev.me>,
	Hao Luo <haoluo@...gle.com>,
	Jiri Olsa <jolsa@...nel.org>,
	Shuah Khan <shuah@...nel.org>,
	Leon Hwang <leon.hwang@...ux.dev>,
	linux-kselftest@...r.kernel.org,
	linux-kernel@...r.kernel.org,
	kernel-patches-bot@...com
Subject: [PATCH bpf-next] selftests/bpf: Harden cpu flags test for lru_percpu_hash map

CI occasionally reports failures in the
percpu_alloc/cpu_flag_lru_percpu_hash selftest, for example:

 First test_progs failure (test_progs_no_alu32-x86_64-llvm-21):
 #264/15 percpu_alloc/cpu_flag_lru_percpu_hash
 ...
 test_percpu_map_op_cpu_flag:FAIL:bpf_map_lookup_batch value on specified cpu unexpected bpf_map_lookup_batch value on specified cpu: actual 0 != expected 3735929054

The unexpected value indicates that an element was removed from the map.
However, the test never calls delete_elem(), so the only possible cause
is LRU eviction.

This can happen when the current task migrates to another CPU: an
update_elem() triggers eviction because there is no available LRU node
on local freelist and global freelist.

Harden the test against this behavior by provisioning sufficient spare
elements. Set max_entries to 'nr_cpus * 2' and restrict the test to using
the first nr_cpus entries, ensuring that updates do not spuriously trigger
LRU eviction.

Signed-off-by: Leon Hwang <leon.hwang@...ux.dev>
---
 .../testing/selftests/bpf/prog_tests/percpu_alloc.c | 13 ++++++++++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
index c1d0949f093f..a72ae0b29f6e 100644
--- a/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
@@ -236,6 +236,8 @@ static void test_percpu_map_op_cpu_flag(struct bpf_map *map, void *keys, size_t
 		err = bpf_map_update_batch(map_fd, keys, values, &count, &batch_opts);
 		if (!ASSERT_OK(err, "bpf_map_update_batch all_cpus"))
 			goto out;
+		if (!ASSERT_EQ(count, entries, "bpf_map_update_batch count"))
+			goto out;
 
 		/* update values on specified CPU */
 		for (i = 0; i < entries; i++)
@@ -246,6 +248,8 @@ static void test_percpu_map_op_cpu_flag(struct bpf_map *map, void *keys, size_t
 		err = bpf_map_update_batch(map_fd, keys, values, &count, &batch_opts);
 		if (!ASSERT_OK(err, "bpf_map_update_batch specified cpu"))
 			goto out;
+		if (!ASSERT_EQ(count, entries, "bpf_map_update_batch count"))
+			goto out;
 
 		/* lookup values on specified CPU */
 		batch = 0;
@@ -254,6 +258,8 @@ static void test_percpu_map_op_cpu_flag(struct bpf_map *map, void *keys, size_t
 		err = bpf_map_lookup_batch(map_fd, NULL, &batch, keys, values, &count, &batch_opts);
 		if (!ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_lookup_batch specified cpu"))
 			goto out;
+		if (!ASSERT_EQ(count, entries, "bpf_map_lookup_batch count"))
+			goto out;
 
 		for (i = 0; i < entries; i++)
 			if (!ASSERT_EQ(values[i], value,
@@ -269,6 +275,8 @@ static void test_percpu_map_op_cpu_flag(struct bpf_map *map, void *keys, size_t
 					   &batch_opts);
 		if (!ASSERT_TRUE(!err || err == -ENOENT, "bpf_map_lookup_batch all_cpus"))
 			goto out;
+		if (!ASSERT_EQ(count, entries, "bpf_map_lookup_batch count"))
+			goto out;
 
 		for (i = 0; i < entries; i++) {
 			values_row = (void *) values_percpu +
@@ -287,7 +295,6 @@ static void test_percpu_map_op_cpu_flag(struct bpf_map *map, void *keys, size_t
 	free(values);
 }
 
-
 static void test_percpu_map_cpu_flag(enum bpf_map_type map_type)
 {
 	struct percpu_alloc_array *skel;
@@ -300,7 +307,7 @@ static void test_percpu_map_cpu_flag(enum bpf_map_type map_type)
 	if (!ASSERT_GT(nr_cpus, 0, "libbpf_num_possible_cpus"))
 		return;
 
-	max_entries = nr_cpus + 1;
+	max_entries = nr_cpus * 2;
 	keys = calloc(max_entries, key_sz);
 	if (!ASSERT_OK_PTR(keys, "calloc keys"))
 		return;
@@ -322,7 +329,7 @@ static void test_percpu_map_cpu_flag(enum bpf_map_type map_type)
 	if (!ASSERT_OK(err, "test_percpu_alloc__load"))
 		goto out;
 
-	test_percpu_map_op_cpu_flag(map, keys, key_sz, max_entries - 1, nr_cpus, true);
+	test_percpu_map_op_cpu_flag(map, keys, key_sz, nr_cpus, nr_cpus, true);
 out:
 	percpu_alloc_array__destroy(skel);
 	free(keys);
-- 
2.52.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ