[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20210412214941.krhkkp7ryx3nf77m@kafai-mbp.dhcp.thefacebook.com>
Date: Mon, 12 Apr 2021 14:49:41 -0700
From: Martin KaFai Lau <kafai@...com>
To: Pedro Tammela <pctammela@...il.com>
CC: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
John Fastabend <john.fastabend@...il.com>,
KP Singh <kpsingh@...nel.org>, Shuah Khan <shuah@...nel.org>,
Pedro Tammela <pctammela@...atatu.com>,
Matthieu Baerts <matthieu.baerts@...sares.net>,
David Verbeiren <david.verbeiren@...sares.net>,
"open list:BPF (Safe dynamic programs and tools)"
<netdev@...r.kernel.org>,
"open list:BPF (Safe dynamic programs and tools)"
<bpf@...r.kernel.org>, open list <linux-kernel@...r.kernel.org>,
"open list:KERNEL SELFTEST FRAMEWORK"
<linux-kselftest@...r.kernel.org>
Subject: Re: [PATCH bpf-next v3 3/3] bpf: selftests: update array map tests
for per-cpu batched ops
On Mon, Apr 12, 2021 at 04:40:01PM -0300, Pedro Tammela wrote:
> Follows the same logic as the hashtable tests.
>
> Signed-off-by: Pedro Tammela <pctammela@...atatu.com>
> ---
> .../bpf/map_tests/array_map_batch_ops.c | 110 +++++++++++++-----
> 1 file changed, 80 insertions(+), 30 deletions(-)
>
> diff --git a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c
> index e42ea1195d18..707d17414dee 100644
> --- a/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c
> +++ b/tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c
> @@ -10,32 +10,59 @@
> #include <test_maps.h>
>
> static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
> - int *values)
> + __s64 *values, bool is_pcpu)
> {
> - int i, err;
> + int nr_cpus = libbpf_num_possible_cpus();
Instead of getting it multiple times, how about moving it out to
a static global and initialize it in test_array_map_batch_ops().
> + int i, j, err;
> + int offset = 0;
> DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
> .elem_flags = 0,
> .flags = 0,
> );
>
> + CHECK(nr_cpus < 0, "nr_cpus checking",
> + "error: get possible cpus failed");
> +
> for (i = 0; i < max_entries; i++) {
> keys[i] = i;
> - values[i] = i + 1;
> + if (is_pcpu)
> + for (j = 0; j < nr_cpus; j++)
> + (values + offset)[j] = i + 1 + j;
> + else
> + values[i] = i + 1;
> + offset += nr_cpus;
This "offset" update here is confusing to read because it is only
used in the is_pcpu case but it always gets updated regardless.
How about only defines and uses offset in the "if (is_pcpu)" case and
rename it to "cpu_offset": cpu_offset = i * nr_cpus.
The same goes for other occasions.
> }
>
> err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts);
> CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
> }
>
> -static void map_batch_verify(int *visited, __u32 max_entries,
> - int *keys, int *values)
> +static void map_batch_verify(int *visited, __u32 max_entries, int *keys,
> + __s64 *values, bool is_pcpu)
> {
> - int i;
> + int nr_cpus = libbpf_num_possible_cpus();
> + int i, j;
> + int offset = 0;
> +
> + CHECK(nr_cpus < 0, "nr_cpus checking",
> + "error: get possible cpus failed");
>
> memset(visited, 0, max_entries * sizeof(*visited));
> for (i = 0; i < max_entries; i++) {
> - CHECK(keys[i] + 1 != values[i], "key/value checking",
> - "error: i %d key %d value %d\n", i, keys[i], values[i]);
> + if (is_pcpu) {
> + for (j = 0; j < nr_cpus; j++) {
> + __s64 value = (values + offset)[j];
> + CHECK(keys[i] + j + 1 != value,
> + "key/value checking",
> + "error: i %d j %d key %d value %d\n", i,
> + j, keys[i], value);
> + }
> + } else {
> + CHECK(keys[i] + 1 != values[i], "key/value checking",
> + "error: i %d key %d value %d\n", i, keys[i],
> + values[i]);
> + }
> + offset += nr_cpus;
> visited[i] = 1;
> }
> for (i = 0; i < max_entries; i++) {
> @@ -44,45 +71,52 @@ static void map_batch_verify(int *visited, __u32 max_entries,
> }
> }
>
> -void test_array_map_batch_ops(void)
> +void __test_map_lookup_and_update_batch(bool is_pcpu)
static
> {
> + int nr_cpus = libbpf_num_possible_cpus();
> struct bpf_create_map_attr xattr = {
> .name = "array_map",
> - .map_type = BPF_MAP_TYPE_ARRAY,
> + .map_type = is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY :
> + BPF_MAP_TYPE_ARRAY,
> .key_size = sizeof(int),
> - .value_size = sizeof(int),
> + .value_size = sizeof(__s64),
> };
> - int map_fd, *keys, *values, *visited;
> + int map_fd, *keys, *visited;
> __u32 count, total, total_success;
> const __u32 max_entries = 10;
> __u64 batch = 0;
> - int err, step;
> + int err, step, value_size;
> + void *values;
> DECLARE_LIBBPF_OPTS(bpf_map_batch_opts, opts,
> .elem_flags = 0,
> .flags = 0,
> );
>
> + CHECK(nr_cpus < 0, "nr_cpus checking",
> + "error: get possible cpus failed");
> +
> xattr.max_entries = max_entries;
> map_fd = bpf_create_map_xattr(&xattr);
> CHECK(map_fd == -1,
> "bpf_create_map_xattr()", "error:%s\n", strerror(errno));
>
> - keys = malloc(max_entries * sizeof(int));
> - values = malloc(max_entries * sizeof(int));
> - visited = malloc(max_entries * sizeof(int));
> + value_size = sizeof(__s64);
> + if (is_pcpu)
> + value_size *= nr_cpus;
> +
> + keys = malloc(max_entries * sizeof(*keys));
> + values = calloc(max_entries, value_size);
Why only this one uses calloc?
> + visited = malloc(max_entries * sizeof(*visited));
> CHECK(!keys || !values || !visited, "malloc()", "error:%s\n",
> strerror(errno));
>
> - /* populate elements to the map */
> - map_batch_update(map_fd, max_entries, keys, values);
> -
> /* test 1: lookup in a loop with various steps. */
> total_success = 0;
> for (step = 1; step < max_entries; step++) {
> - map_batch_update(map_fd, max_entries, keys, values);
> - map_batch_verify(visited, max_entries, keys, values);
> + map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
> + map_batch_verify(visited, max_entries, keys, values, is_pcpu);
> memset(keys, 0, max_entries * sizeof(*keys));
> - memset(values, 0, max_entries * sizeof(*values));
> + memset(values, 0, max_entries * value_size);
> batch = 0;
> total = 0;
> /* iteratively lookup/delete elements with 'step'
> @@ -91,10 +125,10 @@ void test_array_map_batch_ops(void)
> count = step;
> while (true) {
> err = bpf_map_lookup_batch(map_fd,
> - total ? &batch : NULL, &batch,
> - keys + total,
> - values + total,
> - &count, &opts);
> + total ? &batch : NULL,
> + &batch, keys + total,
> + values + total * value_size,
> + &count, &opts);
>
> CHECK((err && errno != ENOENT), "lookup with steps",
> "error: %s\n", strerror(errno));
> @@ -108,7 +142,7 @@ void test_array_map_batch_ops(void)
> CHECK(total != max_entries, "lookup with steps",
> "total = %u, max_entries = %u\n", total, max_entries);
>
> - map_batch_verify(visited, max_entries, keys, values);
> + map_batch_verify(visited, max_entries, keys, values, is_pcpu);
>
> total_success++;
> }
> @@ -116,9 +150,25 @@ void test_array_map_batch_ops(void)
> CHECK(total_success == 0, "check total_success",
> "unexpected failure\n");
>
> - printf("%s:PASS\n", __func__);
> -
> free(keys);
> - free(values);
> free(visited);
> + free(values);
This re-ordering is unnecessary.
> +}
> +
> +void array_map_batch_ops(void)
static
> +{
> + __test_map_lookup_and_update_batch(false);
> + printf("test_%s:PASS\n", __func__);
> +}
> +
> +void array_percpu_map_batch_ops(void)
static
> +{
> + __test_map_lookup_and_update_batch(true);
> + printf("test_%s:PASS\n", __func__);
> +}
> +
> +void test_array_map_batch_ops(void)
> +{
> + array_map_batch_ops();
> + array_percpu_map_batch_ops();
> }
> --
> 2.25.1
>
Powered by blists - more mailing lists