[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAADnVQLARr69Qv9EfwWkpudXLZNb21zYd86aPux_Fv3UAsrLGw@mail.gmail.com>
Date: Fri, 21 Nov 2025 18:34:23 -0800
From: Alexei Starovoitov <alexei.starovoitov@...il.com>
To: Leon Hwang <leon.hwang@...ux.dev>
Cc: bpf <bpf@...r.kernel.org>, Alexei Starovoitov <ast@...nel.org>,
Andrii Nakryiko <andrii@...nel.org>, Daniel Borkmann <daniel@...earbox.net>, Jiri Olsa <jolsa@...nel.org>,
Yonghong Song <yonghong.song@...ux.dev>, Song Liu <song@...nel.org>, Eduard <eddyz87@...il.com>,
Daniel Xu <dxu@...uu.xyz>, Daniel Müller <deso@...teo.net>,
Martin KaFai Lau <martin.lau@...ux.dev>, John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>, Stanislav Fomichev <sdf@...ichev.me>, Hao Luo <haoluo@...gle.com>,
Shuah Khan <shuah@...nel.org>, Jason Xing <kerneljasonxing@...il.com>,
Tao Chen <chen.dylane@...ux.dev>, Willem de Bruijn <willemb@...gle.com>,
Paul Chaignon <paul.chaignon@...il.com>, Anton Protopopov <a.s.protopopov@...il.com>,
Kumar Kartikeya Dwivedi <memxor@...il.com>, Mykyta Yatsenko <yatsenko@...a.com>,
Tobias Klauser <tklauser@...tanz.ch>, kernel-patches-bot@...com,
LKML <linux-kernel@...r.kernel.org>,
"open list:KERNEL SELFTEST FRAMEWORK" <linux-kselftest@...r.kernel.org>
Subject: Re: [PATCH bpf-next v10 8/8] selftests/bpf: Add cases to test
BPF_F_CPU and BPF_F_ALL_CPUS flags
On Mon, Nov 17, 2025 at 8:22 AM Leon Hwang <leon.hwang@...ux.dev> wrote:
>
> Add test coverage for the new BPF_F_CPU and BPF_F_ALL_CPUS flags support
> in percpu maps. The following APIs are exercised:
>
> * bpf_map_update_batch()
> * bpf_map_lookup_batch()
> * bpf_map_update_elem()
> * bpf_map__update_elem()
> * bpf_map_lookup_elem_flags()
> * bpf_map__lookup_elem()
>
> Add tests to verify that array and hash maps do not support BPF_F_CPU
> and BPF_F_ALL_CPUS flags.
>
> Signed-off-by: Leon Hwang <leon.hwang@...ux.dev>
> ---
> .../selftests/bpf/prog_tests/percpu_alloc.c | 312 ++++++++++++++++++
> .../selftests/bpf/progs/percpu_alloc_array.c | 32 ++
> 2 files changed, 344 insertions(+)
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
> index 343da65864d6d..b9c3f5f6cd9c3 100644
> --- a/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
> +++ b/tools/testing/selftests/bpf/prog_tests/percpu_alloc.c
> @@ -1,5 +1,6 @@
> // SPDX-License-Identifier: GPL-2.0
> #include <test_progs.h>
> +#include "cgroup_helpers.h"
> #include "percpu_alloc_array.skel.h"
> #include "percpu_alloc_cgrp_local_storage.skel.h"
> #include "percpu_alloc_fail.skel.h"
> @@ -115,6 +116,305 @@ static void test_failure(void) {
> RUN_TESTS(percpu_alloc_fail);
> }
>
> +static void test_percpu_map_op_cpu_flag(struct bpf_map *map, void *keys, size_t key_sz,
> + u32 max_entries, bool test_batch)
> +{
> + size_t value_sz = sizeof(u32), value_sz_cpus, value_sz_total;
> + u32 *values = NULL, *values_percpu = NULL;
> + int i, j, cpu, map_fd, nr_cpus, err;
> + const u32 value = 0xDEADC0DE;
> + u32 count = max_entries, v;
> + u64 batch = 0, flags;
> + void *values_row;
> + LIBBPF_OPTS(bpf_map_batch_opts, batch_opts);
> +
> + nr_cpus = libbpf_num_possible_cpus();
> + if (!ASSERT_GT(nr_cpus, 0, "libbpf_num_possible_cpus"))
> + return;
> +
> + value_sz_cpus = value_sz * nr_cpus;
> + values = calloc(max_entries, value_sz_cpus);
> + if (!ASSERT_OK_PTR(values, "calloc values"))
> + return;
> +
> + values_percpu = calloc(max_entries, roundup(value_sz, 8) * nr_cpus);
> + if (!ASSERT_OK_PTR(values_percpu, "calloc values_percpu")) {
> + free(values);
> + return;
> + }
> +
> + value_sz_total = value_sz_cpus * max_entries;
> + memset(values, 0, value_sz_total);
> +
> + map_fd = bpf_map__fd(map);
> + flags = BPF_F_CPU | BPF_F_ALL_CPUS;
> + err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
> + if (!ASSERT_ERR(err, "bpf_map_lookup_elem_flags cpu|all_cpus"))
> + goto out;
> +
> + err = bpf_map_update_elem(map_fd, keys, values, flags);
> + if (!ASSERT_ERR(err, "bpf_map_update_elem cpu|all_cpus"))
> + goto out;
> +
> + flags = BPF_F_ALL_CPUS;
> + err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
> + if (!ASSERT_ERR(err, "bpf_map_lookup_elem_flags all_cpus"))
> + goto out;
> +
> + flags = BPF_F_LOCK | BPF_F_CPU;
> + err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
> + if (!ASSERT_ERR(err, "bpf_map_lookup_elem_flags BPF_F_LOCK"))
> + goto out;
> +
> + flags = BPF_F_LOCK | BPF_F_ALL_CPUS;
> + err = bpf_map_update_elem(map_fd, keys, values, flags);
> + if (!ASSERT_ERR(err, "bpf_map_update_elem BPF_F_LOCK"))
> + goto out;
> +
> + flags = (u64)nr_cpus << 32 | BPF_F_CPU;
> + err = bpf_map_update_elem(map_fd, keys, values, flags);
> + if (!ASSERT_EQ(err, -ERANGE, "bpf_map_update_elem -ERANGE"))
> + goto out;
> +
> + err = bpf_map__update_elem(map, keys, key_sz, values, value_sz, flags);
> + if (!ASSERT_EQ(err, -ERANGE, "bpf_map__update_elem -ERANGE"))
> + goto out;
> +
> + err = bpf_map_lookup_elem_flags(map_fd, keys, values, flags);
> + if (!ASSERT_EQ(err, -ERANGE, "bpf_map_lookup_elem_flags -ERANGE"))
> + goto out;
> +
> + err = bpf_map__lookup_elem(map, keys, key_sz, values, value_sz, flags);
> + if (!ASSERT_EQ(err, -ERANGE, "bpf_map__lookup_elem -ERANGE"))
> + goto out;
> +
> + for (cpu = 0; cpu < nr_cpus; cpu++) {
> + /* clear value on all cpus */
> + values[0] = 0;
> + flags = BPF_F_ALL_CPUS;
> + for (i = 0; i < max_entries; i++) {
> + err = bpf_map__update_elem(map, keys + i * key_sz, key_sz, values,
> + value_sz, flags);
> + if (!ASSERT_OK(err, "bpf_map__update_elem all_cpus"))
> + goto out;
> + }
> +
> + /* update value on specified cpu */
> + for (i = 0; i < max_entries; i++) {
> + values[0] = value;
> + flags = (u64)cpu << 32 | BPF_F_CPU;
> + err = bpf_map__update_elem(map, keys + i * key_sz, key_sz, values,
> + value_sz, flags);
> + if (!ASSERT_OK(err, "bpf_map__update_elem specified cpu"))
> + goto out;
> +
> + /* lookup then check value on CPUs */
> + for (j = 0; j < nr_cpus; j++) {
> + flags = (u64)j << 32 | BPF_F_CPU;
> + err = bpf_map__lookup_elem(map, keys + i * key_sz, key_sz, values,
> + value_sz, flags);
> + if (!ASSERT_OK(err, "bpf_map__lookup_elem specified cpu"))
> + goto out;
> + if (!ASSERT_EQ(values[0], j != cpu ? 0 : value,
> + "bpf_map__lookup_elem value on specified cpu"))
> + goto out;
I was about to apply it, but noticed that the test is unstable.
It fails 1 out of 10 for me in the above line.
test_percpu_map_op_cpu_flag:PASS:bpf_map_lookup_batch value on
specified cpu 0 nsec
test_percpu_map_op_cpu_flag:FAIL:bpf_map_lookup_batch value on
specified cpu unexpected bpf_map_lookup_batch value on specified cpu:
actual 0 != expected 3735929054
#261/15 percpu_alloc/cpu_flag_lru_percpu_hash:FAIL
#261 percpu_alloc:FAIL
Please investigate what is going on.
pw-bot: cr
Powered by blists - more mailing lists