lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAEf4BzZdvBKp6WO+zUTF0F9iL2WaukvTWNGZggUPx-nwESpQ7w@mail.gmail.com>
Date:   Wed, 15 Jul 2020 23:04:58 -0700
From:   Andrii Nakryiko <andrii.nakryiko@...il.com>
To:     Song Liu <songliubraving@...com>
Cc:     open list <linux-kernel@...r.kernel.org>,
        bpf <bpf@...r.kernel.org>, Networking <netdev@...r.kernel.org>,
        Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Kernel Team <kernel-team@...com>,
        john fastabend <john.fastabend@...il.com>,
        KP Singh <kpsingh@...omium.org>,
        Jesper Dangaard Brouer <brouer@...hat.com>,
        Peter Ziljstra <peterz@...radead.org>
Subject: Re: [PATCH v2 bpf-next 2/2] selftests/bpf: add callchain_stackid

On Tue, Jul 14, 2020 at 11:09 PM Song Liu <songliubraving@...com> wrote:
>
> This tests new helper function bpf_get_stackid_pe and bpf_get_stack_pe.
> These two helpers have different implementation for perf_event with PEB
> entries.
>
> Signed-off-by: Song Liu <songliubraving@...com>
> ---
>  .../bpf/prog_tests/perf_event_stackmap.c      | 120 ++++++++++++++++++
>  .../selftests/bpf/progs/perf_event_stackmap.c |  64 ++++++++++
>  2 files changed, 184 insertions(+)
>  create mode 100644 tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
>  create mode 100644 tools/testing/selftests/bpf/progs/perf_event_stackmap.c
>

Just few simplification suggestions, but overall looks good, so please add:

Acked-by: Andrii Nakryiko <andriin@...com>

[...]

> +
> +void test_perf_event_stackmap(void)
> +{
> +       struct perf_event_attr attr = {
> +               /* .type = PERF_TYPE_SOFTWARE, */
> +               .type = PERF_TYPE_HARDWARE,
> +               .config = PERF_COUNT_HW_CPU_CYCLES,
> +               .precise_ip = 2,
> +               .sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK |
> +                       PERF_SAMPLE_CALLCHAIN,
> +               .branch_sample_type = PERF_SAMPLE_BRANCH_USER |
> +                       PERF_SAMPLE_BRANCH_NO_FLAGS |
> +                       PERF_SAMPLE_BRANCH_NO_CYCLES |
> +                       PERF_SAMPLE_BRANCH_CALL_STACK,
> +               .sample_period = 5000,
> +               .size = sizeof(struct perf_event_attr),
> +       };
> +       struct perf_event_stackmap *skel;
> +       __u32 duration = 0;
> +       cpu_set_t cpu_set;
> +       int pmu_fd, err;
> +
> +       skel = perf_event_stackmap__open();
> +
> +       if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
> +               return;
> +
> +       /* override program type */
> +       bpf_program__set_perf_event(skel->progs.oncpu);

this should be unnecessary, didn't libbpf detect the type correctly
from SEC? If not, let's fix that.

> +
> +       err = perf_event_stackmap__load(skel);
> +       if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
> +               goto cleanup;
> +
> +       CPU_ZERO(&cpu_set);
> +       CPU_SET(0, &cpu_set);
> +       err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
> +       if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
> +               goto cleanup;
> +
> +       pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
> +                        0 /* cpu 0 */, -1 /* group id */,
> +                        0 /* flags */);
> +       if (pmu_fd < 0) {
> +               printf("%s:SKIP:cpu doesn't support the event\n", __func__);
> +               test__skip();
> +               goto cleanup;
> +       }
> +
> +       skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
> +                                                          pmu_fd);
> +       if (CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event",
> +                 "err %ld\n", PTR_ERR(skel->links.oncpu))) {
> +               close(pmu_fd);
> +               goto cleanup;
> +       }
> +
> +       /* create kernel and user stack traces for testing */
> +       func_6();
> +
> +       CHECK(skel->data->stackid_kernel != 2, "get_stackid_kernel", "failed\n");
> +       CHECK(skel->data->stackid_user != 2, "get_stackid_user", "failed\n");
> +       CHECK(skel->data->stack_kernel != 2, "get_stack_kernel", "failed\n");
> +       CHECK(skel->data->stack_user != 2, "get_stack_user", "failed\n");
> +       close(pmu_fd);

I think pmu_fd will be closed by perf_event_stackmap__destory (through
closing the link)

> +
> +cleanup:
> +       perf_event_stackmap__destroy(skel);
> +}
> diff --git a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
> new file mode 100644
> index 0000000000000..1b0457efeedec
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c
> @@ -0,0 +1,64 @@
> +// SPDX-License-Identifier: GPL-2.0
> +// Copyright (c) 2020 Facebook
> +#include "vmlinux.h"
> +#include <bpf/bpf_helpers.h>
> +
> +#ifndef PERF_MAX_STACK_DEPTH
> +#define PERF_MAX_STACK_DEPTH         127
> +#endif
> +
> +#ifndef BPF_F_USER_STACK
> +#define BPF_F_USER_STACK               (1ULL << 8)
> +#endif

BPF_F_USER_STACK should be in vmlinux.h already, similarly to BPF_F_CURRENT_CPU

> +
> +typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
> +struct {
> +       __uint(type, BPF_MAP_TYPE_STACK_TRACE);
> +       __uint(max_entries, 16384);
> +       __uint(key_size, sizeof(__u32));
> +       __uint(value_size, sizeof(stack_trace_t));
> +} stackmap SEC(".maps");
> +
> +struct {
> +       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
> +       __uint(max_entries, 1);
> +       __type(key, __u32);
> +       __type(value, stack_trace_t);
> +} stackdata_map SEC(".maps");
> +
> +long stackid_kernel = 1;
> +long stackid_user = 1;
> +long stack_kernel = 1;
> +long stack_user = 1;
> +

nit: kind of unusual to go from 1 -> 2, why no zero to one as a flag?
those variables will be available through skel->bss, btw

> +SEC("perf_event")
> +int oncpu(void *ctx)
> +{
> +       int max_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
> +       stack_trace_t *trace;
> +       __u32 key = 0;
> +       long val;
> +
> +       val = bpf_get_stackid(ctx, &stackmap, 0);
> +       if (val > 0)
> +               stackid_kernel = 2;
> +       val = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK);
> +       if (val > 0)
> +               stackid_user = 2;
> +
> +       trace = bpf_map_lookup_elem(&stackdata_map, &key);
> +       if (!trace)
> +               return 0;
> +
> +       val = bpf_get_stack(ctx, trace, max_len, 0);

given you don't care about contents of trace, you could have used
`stack_trace_t trace = {}` global variable instead of PERCPU_ARRAY.

> +       if (val > 0)
> +               stack_kernel = 2;
> +
> +       val = bpf_get_stack(ctx, trace, max_len, BPF_F_USER_STACK);

nit: max_len == sizeof(stack_trace_t) ?

> +       if (val > 0)
> +               stack_user = 2;
> +
> +       return 0;
> +}
> +
> +char LICENSE[] SEC("license") = "GPL";
> --
> 2.24.1
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ