lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Tue, 10 May 2022 16:45:01 -0700
From:   Andrii Nakryiko <andrii.nakryiko@...il.com>
To:     Jiri Olsa <jolsa@...nel.org>
Cc:     Arnaldo Carvalho de Melo <acme@...nel.org>,
        Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Andrii Nakryiko <andrii@...nel.org>,
        "linux-perf-use." <linux-perf-users@...r.kernel.org>,
        Networking <netdev@...r.kernel.org>, bpf <bpf@...r.kernel.org>,
        Ingo Molnar <mingo@...nel.org>,
        Namhyung Kim <namhyung@...nel.org>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Peter Zijlstra <a.p.zijlstra@...llo.nl>,
        Martin KaFai Lau <kafai@...com>,
        Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
        John Fastabend <john.fastabend@...il.com>,
        Ian Rogers <irogers@...gle.com>
Subject: Re: [PATCHv2 perf/core 2/3] perf tools: Register fallback libbpf
 section handler

On Tue, May 10, 2022 at 12:47 AM Jiri Olsa <jolsa@...nel.org> wrote:
>
> Perf is using section name to declare special kprobe arguments,
> which no longer works with current libbpf, that either requires
> certain form of the section name or allows to register custom
> handler.
>
> Adding perf support to register 'fallback' section handler to take
> care of perf kprobe programs. The fallback means that it handles
> any section definition besides the ones that libbpf handles.
>
> The handler serves two purposes:
>   - allows perf programs to have special arguments in section name
>   - allows perf to use pre-load callback where we can attach init
>     code (zeroing all argument registers) to each perf program
>
> The second is essential part of new prologue generation code,
> that's coming in following patch.
>
> Signed-off-by: Jiri Olsa <jolsa@...nel.org>
> ---
>  tools/perf/util/bpf-loader.c | 47 ++++++++++++++++++++++++++++++++++++
>  1 file changed, 47 insertions(+)
>
> diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
> index f8ad581ea247..2a2c9512c4e8 100644
> --- a/tools/perf/util/bpf-loader.c
> +++ b/tools/perf/util/bpf-loader.c
> @@ -86,6 +86,7 @@ bpf_perf_object__next(struct bpf_perf_object *prev)
>              (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
>
>  static bool libbpf_initialized;
> +static int libbpf_sec_handler;
>
>  static int bpf_perf_object__add(struct bpf_object *obj)
>  {
> @@ -99,12 +100,58 @@ static int bpf_perf_object__add(struct bpf_object *obj)
>         return perf_obj ? 0 : -ENOMEM;
>  }
>
> +static struct bpf_insn prologue_init_insn[] = {
> +       BPF_MOV64_IMM(BPF_REG_0, 0),
> +       BPF_MOV64_IMM(BPF_REG_1, 0),

R0 should be initialized before exit anyway. R1 contains context, so
doesn't need initialization, so I think you only need R2-R5?

> +       BPF_MOV64_IMM(BPF_REG_2, 0),
> +       BPF_MOV64_IMM(BPF_REG_3, 0),
> +       BPF_MOV64_IMM(BPF_REG_4, 0),
> +       BPF_MOV64_IMM(BPF_REG_5, 0),
> +};
> +
> +static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
> +                                      struct bpf_prog_load_opts *opts __maybe_unused,
> +                                      long cookie __maybe_unused)
> +{
> +       size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
> +       size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
> +       const struct bpf_insn *orig_insn;
> +       struct bpf_insn *insn;
> +
> +       /* prepend initialization code to program instructions */
> +       orig_insn = bpf_program__insns(prog);
> +       orig_insn_cnt = bpf_program__insn_cnt(prog);
> +       init_size = init_size_cnt * sizeof(*insn);
> +       orig_size = orig_insn_cnt * sizeof(*insn);
> +
> +       insn_cnt = orig_insn_cnt + init_size_cnt;
> +       insn = malloc(insn_cnt * sizeof(*insn));
> +       if (!insn)
> +               return -ENOMEM;
> +
> +       memcpy(insn, prologue_init_insn, init_size);
> +       memcpy((char *) insn + init_size, orig_insn, orig_size);
> +       bpf_program__set_insns(prog, insn, insn_cnt);
> +       return 0;
> +}
> +
>  static int libbpf_init(void)
>  {
> +       LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
> +               .prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
> +       );
> +
>         if (libbpf_initialized)
>                 return 0;
>
>         libbpf_set_print(libbpf_perf_print);
> +       libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
> +                                                         0, &handler_opts);
> +       if (libbpf_sec_handler < 0) {
> +               pr_debug("bpf: failed to register libbpf section handler: %d\n",
> +                        libbpf_sec_handler);
> +               return -BPF_LOADER_ERRNO__INTERNAL;
> +       }
>         libbpf_initialized = true;
>         return 0;
>  }
> --
> 2.35.3
>

Powered by blists - more mailing lists