[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <051f7239-a53e-0683-a2df-5fa66912128c@fb.com>
Date: Fri, 13 Dec 2019 18:34:22 +0000
From: Yonghong Song <yhs@...com>
To: Brian Vazquez <brianvv@...gle.com>,
Brian Vazquez <brianvv.kernel@...il.com>,
Alexei Starovoitov <ast@...nel.org>,
"Daniel Borkmann" <daniel@...earbox.net>,
"David S . Miller" <davem@...emloft.net>
CC: Stanislav Fomichev <sdf@...gle.com>,
Petar Penkov <ppenkov@...gle.com>,
Willem de Bruijn <willemb@...gle.com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"netdev@...r.kernel.org" <netdev@...r.kernel.org>,
"bpf@...r.kernel.org" <bpf@...r.kernel.org>
Subject: Re: [PATCH v3 bpf-next 09/11] selftests/bpf: add batch ops testing
for htab and htab_percpu map
On 12/11/19 2:33 PM, Brian Vazquez wrote:
> From: Yonghong Song <yhs@...com>
>
> Tested bpf_map_lookup_batch(), bpf_map_lookup_and_delete_batch(),
> bpf_map_update_batch(), and bpf_map_delete_batch() functionality.
> $ ./test_maps
> ...
> test_htab_map_batch_ops:PASS
> test_htab_percpu_map_batch_ops:PASS
> ...
>
> Signed-off-by: Yonghong Song <yhs@...com>
> Signed-off-by: Brian Vazquez <brianvv@...gle.com>
> ---
> .../bpf/map_tests/htab_map_batch_ops.c | 269 ++++++++++++++++++
> 1 file changed, 269 insertions(+)
> create mode 100644 tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c
>
> diff --git a/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c
> new file mode 100644
> index 0000000000000..dabc4d420a10e
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c
> @@ -0,0 +1,269 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2019 Facebook */
> +#include <stdio.h>
> +#include <errno.h>
> +#include <string.h>
> +
> +#include <bpf/bpf.h>
> +#include <bpf/libbpf.h>
> +
> +#include <bpf_util.h>
> +#include <test_maps.h>
> +
> +static void map_batch_update(int map_fd, __u32 max_entries, int *keys,
> + void *values, bool is_pcpu)
> +{
> + typedef BPF_DECLARE_PERCPU(int, value);
> + int i, j, err;
> + value *v;
> +
> + if (is_pcpu)
> + v = (value *)values;
> +
> + for (i = 0; i < max_entries; i++) {
> + keys[i] = i + 1;
> + if (is_pcpu)
> + for (j = 0; j < bpf_num_possible_cpus(); j++)
> + bpf_percpu(v[i], j) = i + 2 + j;
> + else
> + ((int *)values)[i] = i + 2;
> + }
> +
> + err = bpf_map_update_batch(map_fd, keys, values, &max_entries, 0, 0);
> + CHECK(err, "bpf_map_update_batch()", "error:%s\n", strerror(errno));
> +}
> +
> +static void map_batch_verify(int *visited, __u32 max_entries,
> + int *keys, void *values, bool is_pcpu)
> +{
> + typedef BPF_DECLARE_PERCPU(int, value);
> + value *v;
> + int i, j;
> +
> + if (is_pcpu)
> + v = (value *)values;
> +
> + memset(visited, 0, max_entries * sizeof(*visited));
> + for (i = 0; i < max_entries; i++) {
> +
> + if (is_pcpu) {
> + for (j = 0; j < bpf_num_possible_cpus(); j++) {
> + CHECK(keys[i] + 1 + j != bpf_percpu(v[i], j),
> + "key/value checking",
> + "error: i %d j %d key %d value %d\n",
> + i, j, keys[i], bpf_percpu(v[i], j));
> + }
> + } else {
> + CHECK(keys[i] + 1 != ((int *)values)[i],
> + "key/value checking",
> + "error: i %d key %d value %d\n", i, keys[i],
> + ((int *)values)[i]);
> + }
> +
> + visited[i] = 1;
> +
> + }
> + for (i = 0; i < max_entries; i++) {
> + CHECK(visited[i] != 1, "visited checking",
> + "error: keys array at index %d missing\n", i);
> + }
> +}
> +
> +void __test_map_lookup_and_delete_batch(bool is_pcpu)
> +{
> + int map_type = is_pcpu ? BPF_MAP_TYPE_PERCPU_HASH : BPF_MAP_TYPE_HASH;
> + struct bpf_create_map_attr xattr = {
> + .name = "hash_map",
> + .map_type = map_type,
> + .key_size = sizeof(int),
> + .value_size = sizeof(int),
> + };
> + __u32 batch, count, total, total_success;
> + typedef BPF_DECLARE_PERCPU(int, value);
> + int map_fd, *keys, *visited, key;
> + const __u32 max_entries = 10;
> + int err, step, value_size;
> + value pcpu_values[10];
> + bool nospace_err;
> + void *values;
> +
> + xattr.max_entries = max_entries;
> + map_fd = bpf_create_map_xattr(&xattr);
> + CHECK(map_fd == -1,
> + "bpf_create_map_xattr()", "error:%s\n", strerror(errno));
> +
> + value_size = is_pcpu ? sizeof(value) : sizeof(int);
> + keys = malloc(max_entries * sizeof(int));
> + if (is_pcpu)
> + values = pcpu_values;
> + else
> + values = malloc(max_entries * sizeof(int));
> + visited = malloc(max_entries * sizeof(int));
> + CHECK(!keys || !values || !visited, "malloc()",
> + "error:%s\n", strerror(errno));
Sorry, I missed it. the mallcoed keys/visited memory region should
be freed at the end of the test. The same for other tests.
> +
> + /* test 1: lookup/delete an empty hash table, -ENOENT */
> + count = max_entries;
> + err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys,
> + values, &count, 0, 0);
> + CHECK((err && errno != ENOENT), "empty map",
> + "error: %s\n", strerror(errno));
> +
> + /* populate elements to the map */
> + map_batch_update(map_fd, max_entries, keys, values, is_pcpu);
> +
> + /* test 2: lookup/delete with count = 0, success */
> + count = 0;
> + err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys,
> + values, &count, 0, 0);
> + CHECK(err, "count = 0", "error: %s\n", strerror(errno));
> +
> + /* test 3: lookup/delete with count = max_entries, success */
> + memset(keys, 0, max_entries * sizeof(*keys));
> + memset(values, 0, max_entries * value_size);
> + count = max_entries;
> + err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys,
> + values, &count, 0, 0);
> + CHECK((err && errno != ENOENT), "count = max_entries",
> + "error: %s\n", strerror(errno));
> + CHECK(count != max_entries, "count = max_entries",
> + "count = %u, max_entries = %u\n", count, max_entries);
> + map_batch_verify(visited, max_entries, keys, values, is_pcpu);
> +
> + /* bpf_map_get_next_key() should return -ENOENT for an empty map. */
> + err = bpf_map_get_next_key(map_fd, NULL, &key);
> + CHECK(!err, "bpf_map_get_next_key()", "error: %s\n", strerror(errno));
> +
[...]
Powered by blists - more mailing lists