[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAPhsuW5yA_zx7iyy-k+048S5a=1QiM=SBKDKhg=sGBVVUb0wvA@mail.gmail.com>
Date: Fri, 25 Feb 2022 22:11:35 -0800
From: Song Liu <song@...nel.org>
To: Stijn Tintel <stijn@...ux-ipv6.be>
Cc: bpf <bpf@...r.kernel.org>, Song Liu <songliubraving@...com>,
Andrii Nakryiko <andrii@...nel.org>,
Toke Høiland-Jørgensen <toke@...hat.com>,
Networking <netdev@...r.kernel.org>,
open list <linux-kernel@...r.kernel.org>,
KP Singh <kpsingh@...nel.org>, Yonghong Song <yhs@...com>,
Martin KaFai Lau <kafai@...com>,
Daniel Borkmann <daniel@...earbox.net>,
Alexei Starovoitov <ast@...nel.org>
Subject: Re: [PATCH v2] libbpf: fix BPF_MAP_TYPE_PERF_EVENT_ARRAY auto-pinning
On Fri, Feb 25, 2022 at 7:24 AM Stijn Tintel <stijn@...ux-ipv6.be> wrote:
>
> When a BPF map of type BPF_MAP_TYPE_PERF_EVENT_ARRAY doesn't have the
> max_entries parameter set, the map will be created with max_entries set
> to the number of available CPUs. When we try to reuse such a pinned map,
> map_is_reuse_compat will return false, as max_entries in the map
> definition differs from max_entries of the existing map, causing the
> following error:
>
> libbpf: couldn't reuse pinned map at '/sys/fs/bpf/m_logging': parameter mismatch
>
> Fix this by overwriting max_entries in the map definition. For this to
> work, we need to do this in bpf_object__create_maps, before calling
> bpf_object__reuse_map.
>
> Fixes: 57a00f41644f ("libbpf: Add auto-pinning of maps when loading BPF objects")
> Signed-off-by: Stijn Tintel <stijn@...ux-ipv6.be>
Acked-by: Song Liu <songliubraving@...com>
> ---
> v2: overwrite max_entries in the map definition instead of adding an
> extra check in map_is_reuse_compat, and introduce a helper function
> for this as suggested by Song.
> ---
> tools/lib/bpf/libbpf.c | 44 ++++++++++++++++++++++++------------------
> 1 file changed, 25 insertions(+), 19 deletions(-)
>
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 7f10dd501a52..133462637b09 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -4854,7 +4854,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
> LIBBPF_OPTS(bpf_map_create_opts, create_attr);
> struct bpf_map_def *def = &map->def;
> const char *map_name = NULL;
> - __u32 max_entries;
> int err = 0;
>
> if (kernel_supports(obj, FEAT_PROG_NAME))
> @@ -4864,21 +4863,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
> create_attr.numa_node = map->numa_node;
> create_attr.map_extra = map->map_extra;
>
> - if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
> - int nr_cpus;
> -
> - nr_cpus = libbpf_num_possible_cpus();
> - if (nr_cpus < 0) {
> - pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
> - map->name, nr_cpus);
> - return nr_cpus;
> - }
> - pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
> - max_entries = nr_cpus;
> - } else {
> - max_entries = def->max_entries;
> - }
> -
> if (bpf_map__is_struct_ops(map))
> create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
>
> @@ -4928,7 +4912,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
>
> if (obj->gen_loader) {
> bpf_gen__map_create(obj->gen_loader, def->type, map_name,
> - def->key_size, def->value_size, max_entries,
> + def->key_size, def->value_size, def->max_entries,
> &create_attr, is_inner ? -1 : map - obj->maps);
> /* Pretend to have valid FD to pass various fd >= 0 checks.
> * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
> @@ -4937,7 +4921,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
> } else {
> map->fd = bpf_map_create(def->type, map_name,
> def->key_size, def->value_size,
> - max_entries, &create_attr);
> + def->max_entries, &create_attr);
> }
> if (map->fd < 0 && (create_attr.btf_key_type_id ||
> create_attr.btf_value_type_id)) {
> @@ -4954,7 +4938,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
> map->btf_value_type_id = 0;
> map->fd = bpf_map_create(def->type, map_name,
> def->key_size, def->value_size,
> - max_entries, &create_attr);
> + def->max_entries, &create_attr);
> }
>
> err = map->fd < 0 ? -errno : 0;
> @@ -5058,6 +5042,24 @@ static int bpf_object_init_prog_arrays(struct bpf_object *obj)
> return 0;
> }
>
> +static int map_set_def_max_entries(struct bpf_map *map)
> +{
> + if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
> + int nr_cpus;
> +
> + nr_cpus = libbpf_num_possible_cpus();
> + if (nr_cpus < 0) {
> + pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
> + map->name, nr_cpus);
> + return nr_cpus;
> + }
> + pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
> + map->def.max_entries = nr_cpus;
> + }
> +
> + return 0;
> +}
> +
> static int
> bpf_object__create_maps(struct bpf_object *obj)
> {
> @@ -5090,6 +5092,10 @@ bpf_object__create_maps(struct bpf_object *obj)
> continue;
> }
>
> + err = map_set_def_max_entries(map);
> + if (err)
> + goto err_out;
> +
> retried = false;
> retry:
> if (map->pin_path) {
> --
> 2.34.1
>
Powered by blists - more mailing lists