lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 14 Sep 2020 16:28:34 -0700
From:   Andrii Nakryiko <andrii.nakryiko@...il.com>
To:     Stanislav Fomichev <sdf@...gle.com>
Cc:     Networking <netdev@...r.kernel.org>, bpf <bpf@...r.kernel.org>,
        "David S. Miller" <davem@...emloft.net>,
        Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        YiFei Zhu <zhuyifei1999@...il.com>
Subject: Re: [PATCH bpf-next v5 3/5] libbpf: Add BPF_PROG_BIND_MAP syscall and
 use it on .rodata section

On Mon, Sep 14, 2020 at 11:37 AM Stanislav Fomichev <sdf@...gle.com> wrote:
>
> From: YiFei Zhu <zhuyifei@...gle.com>
>
> The patch adds a simple wrapper bpf_prog_bind_map around the syscall.
> When the libbpf tries to load a program, it will probe the kernel for
> the support of this syscall and unconditionally bind .rodata section
> to the program.
>
> Cc: YiFei Zhu <zhuyifei1999@...il.com>
> Signed-off-by: YiFei Zhu <zhuyifei@...gle.com>
> Signed-off-by: Stanislav Fomichev <sdf@...gle.com>
> ---
>  tools/lib/bpf/bpf.c      | 16 +++++++++
>  tools/lib/bpf/bpf.h      |  8 +++++
>  tools/lib/bpf/libbpf.c   | 72 ++++++++++++++++++++++++++++++++++++++++
>  tools/lib/bpf/libbpf.map |  1 +
>  4 files changed, 97 insertions(+)
>
> diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
> index 82b983ff6569..2baa1308737c 100644
> --- a/tools/lib/bpf/bpf.c
> +++ b/tools/lib/bpf/bpf.c
> @@ -872,3 +872,19 @@ int bpf_enable_stats(enum bpf_stats_type type)
>
>         return sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
>  }
> +
> +int bpf_prog_bind_map(int prog_fd, int map_fd,
> +                     const struct bpf_prog_bind_opts *opts)
> +{
> +       union bpf_attr attr;
> +
> +       if (!OPTS_VALID(opts, bpf_prog_bind_opts))
> +               return -EINVAL;
> +
> +       memset(&attr, 0, sizeof(attr));
> +       attr.prog_bind_map.prog_fd = prog_fd;
> +       attr.prog_bind_map.map_fd = map_fd;
> +       attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
> +
> +       return sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
> +}
> diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
> index 015d13f25fcc..8c1ac4b42f90 100644
> --- a/tools/lib/bpf/bpf.h
> +++ b/tools/lib/bpf/bpf.h
> @@ -243,6 +243,14 @@ LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
>  enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
>  LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
>
> +struct bpf_prog_bind_opts {
> +       size_t sz; /* size of this struct for forward/backward compatibility */
> +       __u32 flags;
> +};
> +#define bpf_prog_bind_opts__last_field flags
> +
> +LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd,
> +                                const struct bpf_prog_bind_opts *opts);
>  #ifdef __cplusplus
>  } /* extern "C" */
>  #endif
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 550950eb1860..b68fa08e2fa9 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -174,6 +174,8 @@ enum kern_feature_id {
>         FEAT_EXP_ATTACH_TYPE,
>         /* bpf_probe_read_{kernel,user}[_str] helpers */
>         FEAT_PROBE_READ_KERN,
> +       /* BPF_PROG_BIND_MAP is supported */
> +       FEAT_PROG_BIND_MAP,
>         __FEAT_CNT,
>  };
>
> @@ -409,6 +411,7 @@ struct bpf_object {
>         struct extern_desc *externs;
>         int nr_extern;
>         int kconfig_map_idx;
> +       int rodata_map_idx;
>
>         bool loaded;
>         bool has_subcalls;
> @@ -1070,6 +1073,7 @@ static struct bpf_object *bpf_object__new(const char *path,
>         obj->efile.bss_shndx = -1;
>         obj->efile.st_ops_shndx = -1;
>         obj->kconfig_map_idx = -1;
> +       obj->rodata_map_idx = -1;
>
>         obj->kern_version = get_kernel_version();
>         obj->loaded = false;
> @@ -1428,6 +1432,8 @@ static int bpf_object__init_global_data_maps(struct bpf_object *obj)
>                                                     obj->efile.rodata->d_size);
>                 if (err)
>                         return err;
> +
> +               obj->rodata_map_idx = obj->nr_maps - 1;
>         }
>         if (obj->efile.bss_shndx >= 0) {
>                 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
> @@ -3894,6 +3900,55 @@ static int probe_kern_probe_read_kernel(void)
>         return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
>  }
>
> +static int probe_prog_bind_map(void)
> +{
> +       struct bpf_load_program_attr prg_attr;
> +       struct bpf_create_map_attr map_attr;
> +       char *cp, errmsg[STRERR_BUFSIZE];
> +       struct bpf_insn insns[] = {
> +               BPF_MOV64_IMM(BPF_REG_0, 0),
> +               BPF_EXIT_INSN(),
> +       };
> +       int ret, map, prog;
> +
> +       if (!kernel_supports(FEAT_GLOBAL_DATA))
> +               return 0;

TBH, I don't think this check is needed, and it's actually coupling
two independent features together. probe_prog_bind_map() probes
PROG_BIND_MAP, it has nothing to do with global data itself. It's all
cached now, so there is no problem with that, it just feels unclean.
If someone is using .rodata and the kernel doesn't support global
data, we'll fail way sooner. On the other hand, if there will be
another use case where PROG_BIND_MAP is needed for something else, why
would we care about global data support? I know that in the real world
it will be hard to find a kernel with PROG_BIND_MAP and no global data
support, due to the latter being so much older, but still, unnecessary
coupling.

Would be nice to follow up and remove this, thanks.

> +
> +       memset(&map_attr, 0, sizeof(map_attr));
> +       map_attr.map_type = BPF_MAP_TYPE_ARRAY;
> +       map_attr.key_size = sizeof(int);
> +       map_attr.value_size = 32;
> +       map_attr.max_entries = 1;
> +

[...]

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ