lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <CAEf4BzYRTqeanOHddp-9GLwNwma4D=8LUnOYkRdD5Kv=TB+ZpQ@mail.gmail.com>
Date:   Tue, 5 Feb 2019 14:34:56 -0800
From:   Andrii Nakryiko <andrii.nakryiko@...il.com>
To:     Yonghong Song <yhs@...com>
Cc:     Andrii Nakryiko <andriin@...com>, Song Liu <songliubraving@...com>,
        Alexei Starovoitov <ast@...com>, Martin Lau <kafai@...com>,
        "netdev@...r.kernel.org" <netdev@...r.kernel.org>,
        "daniel@...earbox.net" <daniel@...earbox.net>
Subject: Re: [PATCH bpf-next 1/2] btf: separate btf creation and loading

On Tue, Feb 5, 2019 at 1:25 PM Yonghong Song <yhs@...com> wrote:
>
>
>
> On 2/5/19 11:48 AM, Andrii Nakryiko wrote:
> > This change splits out previous btf__new functionality of constructing
> > struct btf and loading it into kernel into two:
> > - btf__new() just creates and initializes struct btf
> > - btf__load() attempts to load existing struct btf into kernel
> >
> > btf__free will still close BTF fd, if it was ever loaded successfully
> > into kernel.
> >
> > This change allows users of libbpf to manipulate BTF using its API,
> > without the need to unnecessarily load it into kernel.
> >
> > One of the intended use cases is pahole using libbpf to do DWARF to BTF
> > conversion and deduplication using libbpf, while handling ELF sections
> > overwrites and other concerns on its own.
> >
> > Signed-off-by: Andrii Nakryiko <andriin@...com>
> > Acked-by: Song Liu <songliubraving@...com>
> > ---
> >   tools/lib/bpf/btf.c      | 53 ++++++++++++++++++++++------------------
> >   tools/lib/bpf/btf.h      |  1 +
> >   tools/lib/bpf/libbpf.c   |  2 +-
> >   tools/lib/bpf/libbpf.map |  1 +
> >   4 files changed, 32 insertions(+), 25 deletions(-)
> >
> > diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
> > index 4949f8840bda..065d51fa63e5 100644
> > --- a/tools/lib/bpf/btf.c
> > +++ b/tools/lib/bpf/btf.c
> > @@ -366,8 +366,6 @@ void btf__free(struct btf *btf)
> >
> >   struct btf *btf__new(__u8 *data, __u32 size)
> >   {
> > -     __u32 log_buf_size = 0;
> > -     char *log_buf = NULL;
> >       struct btf *btf;
> >       int err;
> >
> > @@ -377,15 +375,6 @@ struct btf *btf__new(__u8 *data, __u32 size)
> >
> >       btf->fd = -1;
> >
> > -     log_buf = malloc(BPF_LOG_BUF_SIZE);
> > -     if (!log_buf) {
> > -             err = -ENOMEM;
> > -             goto done;
> > -     }
> > -
> > -     *log_buf = 0;
> > -     log_buf_size = BPF_LOG_BUF_SIZE;
> > -
> >       btf->data = malloc(size);
> >       if (!btf->data) {
> >               err = -ENOMEM;
> > @@ -395,17 +384,6 @@ struct btf *btf__new(__u8 *data, __u32 size)
> >       memcpy(btf->data, data, size);
> >       btf->data_size = size;
> >
> > -     btf->fd = bpf_load_btf(btf->data, btf->data_size,
> > -                            log_buf, log_buf_size, false);
> > -
> > -     if (btf->fd == -1) {
> > -             err = -errno;
> > -             pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
> > -             if (log_buf && *log_buf)
> > -                     pr_warning("%s\n", log_buf);
> > -             goto done;
> > -     }
> > -
> >       err = btf_parse_hdr(btf);
> >       if (err)
> >               goto done;
> > @@ -417,8 +395,6 @@ struct btf *btf__new(__u8 *data, __u32 size)
> >       err = btf_parse_type_sec(btf);
> >
> >   done:
> > -     free(log_buf);
> > -
> >       if (err) {
> >               btf__free(btf);
> >               return ERR_PTR(err);
> > @@ -427,6 +403,35 @@ struct btf *btf__new(__u8 *data, __u32 size)
> >       return btf;
> >   }
> >
> > +int btf__load(struct btf* btf) {
> > +     __u32 log_buf_size = BPF_LOG_BUF_SIZE;
> > +     char *log_buf = NULL;
> > +
> > +     if (btf->fd >= 0) {
> > +             return -EEXIST;
> > +     }
> > +
> > +     log_buf = malloc(log_buf_size);
> > +     if (!log_buf)
> > +             return -ENOMEM;
> > +
> > +     *log_buf = 0;
> > +
> > +     btf->fd = bpf_load_btf(btf->data, btf->data_size,
> > +                            log_buf, log_buf_size, false);
> > +     if (btf->fd < 0) {
> > +             btf->fd = -errno;
>
> Why you set btf->fd = -errno? Do you have any intended use for it later.
> If not, I would still prefer the existing value -1. This will be
> consistent with all other fd field convention in libbpf.

I though it would be convenient to get created fd back from
btf_load(), but I don't have strong preferences. In any case, we have
btf__get_fd() to get it back, so I will change btf__load() to return
just error code.

>
> > +             pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
> > +             if (*log_buf)
> > +                     pr_warning("%s\n", log_buf);
> > +             goto done;
> > +     }
> > +
> > +done:
> > +     free(log_buf);
> > +     return btf->fd;
> > +}
> > +
> >   int btf__fd(const struct btf *btf)
> >   {
> >       return btf->fd;
> > diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
> > index 25a9d2db035d..e8410887f93a 100644
> > --- a/tools/lib/bpf/btf.h
> > +++ b/tools/lib/bpf/btf.h
> > @@ -57,6 +57,7 @@ struct btf_ext_header {
> >
> >   LIBBPF_API void btf__free(struct btf *btf);
> >   LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
> > +LIBBPF_API int btf__load(struct btf* btf);
> >   LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
> >                                  const char *type_name);
> >   LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
> > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > index 47969aa0faf8..75b82c1cfc72 100644
> > --- a/tools/lib/bpf/libbpf.c
> > +++ b/tools/lib/bpf/libbpf.c
> > @@ -835,7 +835,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
> >                       obj->efile.maps_shndx = idx;
> >               else if (strcmp(name, BTF_ELF_SEC) == 0) {
> >                       obj->btf = btf__new(data->d_buf, data->d_size);
> > -                     if (IS_ERR(obj->btf)) {
> > +                     if (IS_ERR(obj->btf) || btf__load(obj->btf) < 0) {
> >                               pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
> >                                          BTF_ELF_SEC, PTR_ERR(obj->btf));
> >                               obj->btf = NULL;
> > diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
> > index 89c1149e32ee..ffa1fe044f6a 100644
> > --- a/tools/lib/bpf/libbpf.map
> > +++ b/tools/lib/bpf/libbpf.map
> > @@ -134,6 +134,7 @@ LIBBPF_0.0.2 {
> >               bpf_object__find_map_fd_by_name;
> >               bpf_get_link_xdp_id;
> >               btf__dedup;
> > +             btf__load;
>
> Maybe put btf__load after btf__get_strings based on alphabetical order.
> With the above changes,

Yep, will fix to be alphabetically sorted.

>
>
> >               btf__get_map_kv_tids;
> >               btf__get_nr_types;
> >               btf__get_strings;
> >

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ