lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 14 Jan 2020 17:49:00 -0800
From:   Andrii Nakryiko <andrii.nakryiko@...il.com>
To:     Martin KaFai Lau <kafai@...com>
Cc:     bpf <bpf@...r.kernel.org>, Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        David Miller <davem@...emloft.net>,
        Kernel Team <kernel-team@...com>,
        Networking <netdev@...r.kernel.org>
Subject: Re: [PATCH bpf-next 5/5] bpftool: Support dumping a map with btf_vmlinux_value_type_id

On Tue, Jan 14, 2020 at 2:46 PM Martin KaFai Lau <kafai@...com> wrote:
>
> This patch makes bpftool support dumping a map's value properly
> when the map's value type is a type of the running kernel's btf.
> (i.e. map_info.btf_vmlinux_value_type_id is set instead of
> map_info.btf_value_type_id).  The first usecase is for the
> BPF_MAP_TYPE_STRUCT_OPS.
>
> Signed-off-by: Martin KaFai Lau <kafai@...com>
> ---
>  tools/bpf/bpftool/map.c | 43 +++++++++++++++++++++++++++++++----------
>  1 file changed, 33 insertions(+), 10 deletions(-)
>
> diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
> index 4c5b15d736b6..d25f3b2355ad 100644
> --- a/tools/bpf/bpftool/map.c
> +++ b/tools/bpf/bpftool/map.c
> @@ -20,6 +20,7 @@
>  #include "btf.h"
>  #include "json_writer.h"
>  #include "main.h"
> +#include "libbpf_internal.h"
>
>  const char * const map_type_name[] = {
>         [BPF_MAP_TYPE_UNSPEC]                   = "unspec",
> @@ -252,6 +253,7 @@ static int do_dump_btf(const struct btf_dumper *d,
>                        struct bpf_map_info *map_info, void *key,
>                        void *value)
>  {
> +       __u32 value_id;
>         int ret;
>
>         /* start of key-value pair */
> @@ -265,9 +267,12 @@ static int do_dump_btf(const struct btf_dumper *d,
>                         goto err_end_obj;
>         }
>
> +       value_id = map_info->btf_vmlinux_value_type_id ?
> +               : map_info->btf_value_type_id;
> +
>         if (!map_is_per_cpu(map_info->type)) {
>                 jsonw_name(d->jw, "value");
> -               ret = btf_dumper_type(d, map_info->btf_value_type_id, value);
> +               ret = btf_dumper_type(d, value_id, value);
>         } else {
>                 unsigned int i, n, step;
>
> @@ -279,8 +284,7 @@ static int do_dump_btf(const struct btf_dumper *d,
>                         jsonw_start_object(d->jw);
>                         jsonw_int_field(d->jw, "cpu", i);
>                         jsonw_name(d->jw, "value");
> -                       ret = btf_dumper_type(d, map_info->btf_value_type_id,
> -                                             value + i * step);
> +                       ret = btf_dumper_type(d, value_id, value + i * step);
>                         jsonw_end_object(d->jw);
>                         if (ret)
>                                 break;
> @@ -932,6 +936,27 @@ static int maps_have_btf(int *fds, int nb_fds)
>         return 1;
>  }
>
> +static struct btf *get_map_kv_btf(const struct bpf_map_info *info)
> +{
> +       struct btf *btf = NULL;
> +
> +       if (info->btf_vmlinux_value_type_id) {
> +               btf = bpf_find_kernel_btf();

If there are multiple maps we are dumping, it might become quite
costly to re-read and re-parse kernel BTF all the time. Can we lazily
load it, when required, and cache instead?

> +               if (IS_ERR(btf))
> +                       p_err("failed to get kernel btf");
> +       } else if (info->btf_value_type_id) {
> +               int err;
> +
> +               err = btf__get_from_id(info->btf_id, &btf);
> +               if (err || !btf) {
> +                       p_err("failed to get btf");
> +                       btf = err ? ERR_PTR(err) : ERR_PTR(-ESRCH);
> +               }
> +       }
> +
> +       return btf;
> +}
> +
>  static int
>  map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
>          bool show_header)
> @@ -952,13 +977,11 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
>         prev_key = NULL;
>
>         if (wtr) {
> -               if (info->btf_id) {
> -                       err = btf__get_from_id(info->btf_id, &btf);
> -                       if (err || !btf) {
> -                               err = err ? : -ESRCH;
> -                               p_err("failed to get btf");
> -                               goto exit_free;
> -                       }
> +               btf = get_map_kv_btf(info);
> +               if (IS_ERR(btf)) {
> +                       err = PTR_ERR(btf);
> +                       btf = NULL;
> +                       goto exit_free;
>                 }
>
>                 if (show_header) {
> --
> 2.17.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ