lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Thu, 10 Mar 2022 15:21:17 -0800
From:   Yonghong Song <yhs@...com>
To:     Namhyung Kim <namhyung@...nel.org>,
        Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>,
        Andrii Nakryiko <andrii@...nel.org>
Cc:     Martin KaFai Lau <kafai@...com>, Song Liu <songliubraving@...com>,
        John Fastabend <john.fastabend@...il.com>,
        KP Singh <kpsingh@...nel.org>, netdev@...r.kernel.org,
        bpf@...r.kernel.org, LKML <linux-kernel@...r.kernel.org>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Eugene Loh <eugene.loh@...cle.com>, Hao Luo <haoluo@...gle.com>
Subject: Re: [PATCH 2/2] bpf/selftests: Test skipping stacktrace



On 3/10/22 12:22 AM, Namhyung Kim wrote:
> Add a test case for stacktrace with skip > 0 using a small sized
> buffer.  It didn't support skipping entries greater than or equal to
> the size of buffer and filled the skipped part with 0.
> 
> Signed-off-by: Namhyung Kim <namhyung@...nel.org>
> ---
>   .../bpf/prog_tests/stacktrace_map_skip.c      | 72 ++++++++++++++++
>   .../selftests/bpf/progs/stacktrace_map_skip.c | 82 +++++++++++++++++++
>   2 files changed, 154 insertions(+)
>   create mode 100644 tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
>   create mode 100644 tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
> 
> diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
> new file mode 100644
> index 000000000000..bcb244aa3c78
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
> @@ -0,0 +1,72 @@
> +// SPDX-License-Identifier: GPL-2.0
> +#include <test_progs.h>
> +#include "stacktrace_map_skip.skel.h"
> +
> +#define TEST_STACK_DEPTH  2
> +
> +void test_stacktrace_map_skip(void)
> +{
> +	struct stacktrace_map_skip *skel;
> +	int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
> +	int err, stack_trace_len;
> +	__u32 key, val, duration = 0;
> +
> +	skel = stacktrace_map_skip__open_and_load();
> +	if (CHECK(!skel, "skel_open_and_load", "skeleton open failed\n"))
> +		return;

Please use ASSERT_* macros instead of CHECK* macros.
You can see other prog_tests/*.c files for examples.

> +
> +	/* find map fds */
> +	control_map_fd = bpf_map__fd(skel->maps.control_map);
> +	if (CHECK_FAIL(control_map_fd < 0))
> +		goto out;
> +
> +	stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
> +	if (CHECK_FAIL(stackid_hmap_fd < 0))
> +		goto out;
> +
> +	stackmap_fd = bpf_map__fd(skel->maps.stackmap);
> +	if (CHECK_FAIL(stackmap_fd < 0))
> +		goto out;
> +
> +	stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
> +	if (CHECK_FAIL(stack_amap_fd < 0))
> +		goto out;
> +
> +	err = stacktrace_map_skip__attach(skel);
> +	if (CHECK(err, "skel_attach", "skeleton attach failed\n"))
> +		goto out;
> +
> +	/* give some time for bpf program run */
> +	sleep(1);
> +
> +	/* disable stack trace collection */
> +	key = 0;
> +	val = 1;
> +	bpf_map_update_elem(control_map_fd, &key, &val, 0);
> +
> +	/* for every element in stackid_hmap, we can find a corresponding one
> +	 * in stackmap, and vise versa.
> +	 */
> +	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
> +	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
> +		  "err %d errno %d\n", err, errno))
> +		goto out;
> +
> +	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
> +	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
> +		  "err %d errno %d\n", err, errno))
> +		goto out;
> +
> +	stack_trace_len = TEST_STACK_DEPTH * sizeof(__u64);
> +	err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
> +	if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
> +		  "err %d errno %d\n", err, errno))
> +		goto out;
> +
> +	if (CHECK(skel->bss->failed, "check skip",
> +		  "failed to skip some depth: %d", skel->bss->failed))
> +		goto out;
> +
> +out:
> +	stacktrace_map_skip__destroy(skel);
> +}
> diff --git a/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
> new file mode 100644
> index 000000000000..323248b17ae4
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
> @@ -0,0 +1,82 @@
> +// SPDX-License-Identifier: GPL-2.0
> +#include <vmlinux.h>
> +#include <bpf/bpf_helpers.h>
> +
> +#define TEST_STACK_DEPTH         2
> +
> +struct {
> +	__uint(type, BPF_MAP_TYPE_ARRAY);
> +	__uint(max_entries, 1);
> +	__type(key, __u32);
> +	__type(value, __u32);
> +} control_map SEC(".maps");

You can use a global variable for this.
The global variable can be assigned a value (if needed, e.g., non-zero)
before skeleton open and load.

> +
> +struct {
> +	__uint(type, BPF_MAP_TYPE_HASH);
> +	__uint(max_entries, 16384);
> +	__type(key, __u32);
> +	__type(value, __u32);
> +} stackid_hmap SEC(".maps");
> +
> +typedef __u64 stack_trace_t[TEST_STACK_DEPTH];
> +
> +struct {
> +	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
> +	__uint(max_entries, 16384);
> +	__type(key, __u32);
> +	__type(value, stack_trace_t);
> +} stackmap SEC(".maps");
> +
> +struct {
> +	__uint(type, BPF_MAP_TYPE_ARRAY);
> +	__uint(max_entries, 16384);
> +	__type(key, __u32);
> +	__type(value, stack_trace_t);
> +} stack_amap SEC(".maps");
> +
> +/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
> +struct sched_switch_args {
> +	unsigned long long pad;
> +	char prev_comm[TASK_COMM_LEN];
> +	int prev_pid;
> +	int prev_prio;
> +	long long prev_state;
> +	char next_comm[TASK_COMM_LEN];
> +	int next_pid;
> +	int next_prio;
> +};

You can use this structure in vmlinux.h instead of the above:
struct trace_event_raw_sched_switch {
         struct trace_entry ent;
         char prev_comm[16];
         pid_t prev_pid;
         int prev_prio;
         long int prev_state;
         char next_comm[16];
         pid_t next_pid;
         int next_prio;
         char __data[0];
};


> +
> +int failed = 0;
> +
> +SEC("tracepoint/sched/sched_switch")
> +int oncpu(struct sched_switch_args *ctx)
> +{
> +	__u32 max_len = TEST_STACK_DEPTH * sizeof(__u64);
> +	__u32 key = 0, val = 0, *value_p;
> +	__u64 *stack_p;
> +
> +	value_p = bpf_map_lookup_elem(&control_map, &key);
> +	if (value_p && *value_p)
> +		return 0; /* skip if non-zero *value_p */
> +
> +	/* it should allow skipping whole buffer size entries */
> +	key = bpf_get_stackid(ctx, &stackmap, TEST_STACK_DEPTH);
> +	if ((int)key >= 0) {
> +		/* The size of stackmap and stack_amap should be the same */
> +		bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
> +		stack_p = bpf_map_lookup_elem(&stack_amap, &key);
> +		if (stack_p) {
> +			bpf_get_stack(ctx, stack_p, max_len, TEST_STACK_DEPTH);
> +			/* it wrongly skipped all the entries and filled zero */
> +			if (stack_p[0] == 0)
> +				failed = 1;
> +		}
> +	} else if ((int)key == -14/*EFAULT*/) {
> +		/* old kernel doesn't support skipping that many entries */
> +		failed = 2;

The selftest is supposed to run with the kernel in the same code base.
So it is okay to skip the above 'if' test and just set failed = 2.

> +	}
> +
> +	return 0;
> +}
> +
> +char _license[] SEC("license") = "GPL";

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ