lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180419043745.a23qak7peaurmiqg@ast-mbp>
Date:   Wed, 18 Apr 2018 21:37:46 -0700
From:   Alexei Starovoitov <alexei.starovoitov@...il.com>
To:     Yonghong Song <yhs@...com>
Cc:     ast@...com, daniel@...earbox.net, netdev@...r.kernel.org,
        kernel-team@...com
Subject: Re: [PATCH bpf-next v2 7/9] samples/bpf: add a test for
 bpf_get_stack helper

On Wed, Apr 18, 2018 at 09:54:42AM -0700, Yonghong Song wrote:
> The test attached a kprobe program to kernel function sys_write.
> It tested to get stack for user space, kernel space and user
> space with build_id request. It also tested to get user
> and kernel stack into the same buffer with back-to-back
> bpf_get_stack helper calls.
> 
> Whenever the kernel stack is available, the user space
> application will check to ensure that sys_write/SyS_write
> is part of the stack.
> 
> Signed-off-by: Yonghong Song <yhs@...com>
> ---
>  samples/bpf/Makefile               |   4 +
>  samples/bpf/trace_get_stack_kern.c |  86 +++++++++++++++++++++
>  samples/bpf/trace_get_stack_user.c | 150 +++++++++++++++++++++++++++++++++++++
>  3 files changed, 240 insertions(+)

since perf_read is being refactored out of trace_output_user.c in the previous patch
please move it to selftests (instead of bpf_load.c) and move
this whole test to selftests as well.

>  create mode 100644 samples/bpf/trace_get_stack_kern.c
>  create mode 100644 samples/bpf/trace_get_stack_user.c
> 
> diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
> index 4d6a6ed..94e7b10 100644
> --- a/samples/bpf/Makefile
> +++ b/samples/bpf/Makefile
> @@ -44,6 +44,7 @@ hostprogs-y += xdp_monitor
>  hostprogs-y += xdp_rxq_info
>  hostprogs-y += syscall_tp
>  hostprogs-y += cpustat
> +hostprogs-y += trace_get_stack
>  
>  # Libbpf dependencies
>  LIBBPF := ../../tools/lib/bpf/bpf.o ../../tools/lib/bpf/nlattr.o
> @@ -95,6 +96,7 @@ xdp_monitor-objs := bpf_load.o $(LIBBPF) xdp_monitor_user.o
>  xdp_rxq_info-objs := bpf_load.o $(LIBBPF) xdp_rxq_info_user.o
>  syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o
>  cpustat-objs := bpf_load.o $(LIBBPF) cpustat_user.o
> +trace_get_stack-objs := bpf_load.o $(LIBBPF) trace_get_stack_user.o
>  
>  # Tell kbuild to always build the programs
>  always := $(hostprogs-y)
> @@ -148,6 +150,7 @@ always += xdp_rxq_info_kern.o
>  always += xdp2skb_meta_kern.o
>  always += syscall_tp_kern.o
>  always += cpustat_kern.o
> +always += trace_get_stack_kern.o
>  
>  HOSTCFLAGS += -I$(objtree)/usr/include
>  HOSTCFLAGS += -I$(srctree)/tools/lib/
> @@ -193,6 +196,7 @@ HOSTLOADLIBES_xdp_monitor += -lelf
>  HOSTLOADLIBES_xdp_rxq_info += -lelf
>  HOSTLOADLIBES_syscall_tp += -lelf
>  HOSTLOADLIBES_cpustat += -lelf
> +HOSTLOADLIBES_trace_get_stack += -lelf
>  
>  # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
>  #  make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
> diff --git a/samples/bpf/trace_get_stack_kern.c b/samples/bpf/trace_get_stack_kern.c
> new file mode 100644
> index 0000000..665e4ad
> --- /dev/null
> +++ b/samples/bpf/trace_get_stack_kern.c
> @@ -0,0 +1,86 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include <linux/ptrace.h>
> +#include <linux/version.h>
> +#include <uapi/linux/bpf.h>
> +#include "bpf_helpers.h"
> +
> +/* Permit pretty deep stack traces */
> +#define MAX_STACK 100
> +struct stack_trace_t {
> +	int pid;
> +	int kern_stack_size;
> +	int user_stack_size;
> +	int user_stack_buildid_size;
> +	u64 kern_stack[MAX_STACK];
> +	u64 user_stack[MAX_STACK];
> +	struct bpf_stack_build_id user_stack_buildid[MAX_STACK];
> +};
> +
> +struct bpf_map_def SEC("maps") perfmap = {
> +	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
> +	.key_size = sizeof(int),
> +	.value_size = sizeof(u32),
> +	.max_entries = 2,
> +};
> +
> +struct bpf_map_def SEC("maps") stackdata_map = {
> +	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
> +	.key_size = sizeof(u32),
> +	.value_size = sizeof(struct stack_trace_t),
> +	.max_entries = 1,
> +};
> +
> +struct bpf_map_def SEC("maps") rawdata_map = {
> +	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
> +	.key_size = sizeof(u32),
> +	.value_size = MAX_STACK * sizeof(u64) * 2,
> +	.max_entries = 1,
> +};
> +
> +SEC("kprobe/sys_write")
> +int bpf_prog1(struct pt_regs *ctx)
> +{
> +	int max_len, max_buildid_len, usize, ksize, total_size;
> +	struct stack_trace_t *data;
> +	void *raw_data;
> +	u32 key = 0;
> +
> +	data = bpf_map_lookup_elem(&stackdata_map, &key);
> +	if (!data)
> +		return 0;
> +
> +	max_len = MAX_STACK * sizeof(u64);
> +	max_buildid_len = MAX_STACK * sizeof(struct bpf_stack_build_id);
> +	data->pid = bpf_get_current_pid_tgid();
> +	data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack,
> +					      max_len, 0);
> +	data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len,
> +					    BPF_F_USER_STACK);
> +	data->user_stack_buildid_size = bpf_get_stack(
> +		ctx, data->user_stack_buildid, max_buildid_len,
> +		BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
> +	bpf_perf_event_output(ctx, &perfmap, 0, data, sizeof(*data));
> +
> +	/* write both kernel and user stacks to the same buffer */
> +	raw_data = bpf_map_lookup_elem(&rawdata_map, &key);
> +	if (!raw_data)
> +		return 0;
> +
> +	usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
> +	if (usize < 0)
> +		return 0;
> +
> +	ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
> +	if (ksize < 0)
> +		return 0;
> +
> +	total_size = usize + ksize;
> +	if (total_size > 0 && total_size <= max_len)
> +		bpf_perf_event_output(ctx, &perfmap, 0, raw_data, total_size);
> +
> +	return 0;
> +}
> +
> +char _license[] SEC("license") = "GPL";
> +u32 _version SEC("version") = LINUX_VERSION_CODE;
> diff --git a/samples/bpf/trace_get_stack_user.c b/samples/bpf/trace_get_stack_user.c
> new file mode 100644
> index 0000000..f64f5a5
> --- /dev/null
> +++ b/samples/bpf/trace_get_stack_user.c
> @@ -0,0 +1,150 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +#include <stdio.h>
> +#include <unistd.h>
> +#include <stdlib.h>
> +#include <stdbool.h>
> +#include <string.h>
> +#include <fcntl.h>
> +#include <poll.h>
> +#include <linux/perf_event.h>
> +#include <linux/bpf.h>
> +#include <errno.h>
> +#include <assert.h>
> +#include <sys/syscall.h>
> +#include <sys/ioctl.h>
> +#include <sys/mman.h>
> +#include <time.h>
> +#include <signal.h>
> +#include "libbpf.h"
> +#include "bpf_load.h"
> +#include "perf-sys.h"
> +
> +static int pmu_fd;
> +
> +#define MAX_CNT 10ull
> +#define MAX_STACK 100
> +struct stack_trace_t {
> +	int pid;
> +	int kern_stack_size;
> +	int user_stack_size;
> +	int user_stack_buildid_size;
> +	__u64 kern_stack[MAX_STACK];
> +	__u64 user_stack[MAX_STACK];
> +	struct bpf_stack_build_id user_stack_buildid[MAX_STACK];
> +};
> +
> +static void print_bpf_output(void *data, int size)
> +{
> +	struct stack_trace_t *e = data;
> +	int i, num_stack;
> +	static __u64 cnt;
> +	bool found = false;
> +
> +	cnt++;
> +
> +	if (size < sizeof(struct stack_trace_t)) {
> +		__u64 *raw_data = data;
> +
> +		num_stack = size / sizeof(__u64);
> +		printf("sample size = %d, raw stack\n\t", size);
> +		for (i = 0; i < num_stack; i++) {
> +			struct ksym *ks = ksym_search(raw_data[i]);
> +
> +			printf("0x%llx ", raw_data[i]);
> +			if (ks && (strcmp(ks->name, "sys_write") == 0 ||
> +				   strcmp(ks->name, "SyS_write") == 0))
> +				found = true;
> +		}
> +		printf("\n");
> +	} else {
> +		printf("sample size = %d, pid %d\n", size, e->pid);
> +		if (e->kern_stack_size > 0) {
> +			num_stack = e->kern_stack_size / sizeof(__u64);
> +			printf("\tkernel_stack(%d): ", num_stack);
> +			for (i = 0; i < num_stack; i++) {
> +				struct ksym *ks = ksym_search(e->kern_stack[i]);
> +
> +				printf("0x%llx ", e->kern_stack[i]);
> +				if (ks && (strcmp(ks->name, "sys_write") == 0 ||
> +					   strcmp(ks->name, "SyS_write") == 0))
> +					found = true;
> +			}
> +			printf("\n");
> +		}
> +		if (e->user_stack_size > 0) {
> +			num_stack = e->user_stack_size / sizeof(__u64);
> +			printf("\tuser_stack(%d): ", num_stack);
> +			for (i = 0; i < num_stack; i++)
> +				printf("0x%llx ", e->user_stack[i]);
> +			printf("\n");
> +		}
> +		if (e->user_stack_buildid_size > 0) {
> +			num_stack = e->user_stack_buildid_size /
> +				    sizeof(struct bpf_stack_build_id);
> +			printf("\tuser_stack_buildid(%d): ", num_stack);
> +			for (i = 0; i < num_stack; i++) {
> +				int j;
> +
> +				printf("(%d, 0x", e->user_stack_buildid[i].status);
> +				for (j = 0; j < BPF_BUILD_ID_SIZE; j++)
> +					printf("%02x", e->user_stack_buildid[i].build_id[i]);
> +				printf(", %llx) ", e->user_stack_buildid[i].offset);
> +			}
> +			printf("\n");
> +		}
> +	}
> +	if (!found) {
> +		printf("received %lld events, kern symbol not found, exiting ...\n", cnt);
> +		kill(0, SIGINT);
> +	}
> +
> +	if (cnt == MAX_CNT) {
> +		printf("received max %lld events, exiting ...\n", cnt);
> +		kill(0, SIGINT);
> +	}
> +}
> +
> +static void test_bpf_perf_event(void)
> +{
> +	struct perf_event_attr attr = {
> +		.sample_type = PERF_SAMPLE_RAW,
> +		.type = PERF_TYPE_SOFTWARE,
> +		.config = PERF_COUNT_SW_BPF_OUTPUT,
> +	};
> +	int key = 0;
> +
> +	pmu_fd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
> +
> +	assert(pmu_fd >= 0);
> +	assert(bpf_map_update_elem(map_fd[0], &key, &pmu_fd, BPF_ANY) == 0);
> +	ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
> +}
> +
> +static void action(void)
> +{
> +	FILE *f;
> +
> +	f = popen("taskset 1 dd if=/dev/zero of=/dev/null", "r");
> +	(void) f;
> +}
> +
> +int main(int argc, char **argv)
> +{
> +	char filename[256];
> +
> +	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
> +
> +	if (load_kallsyms()) {
> +		printf("failed to process /proc/kallsyms\n");
> +		return 2;
> +	}
> +
> +	if (load_bpf_file(filename)) {
> +		printf("%s", bpf_log_buf);
> +		return 1;
> +	}
> +
> +	test_bpf_perf_event();
> +	return perf_event_poller(pmu_fd, action, print_bpf_output);
> +}
> -- 
> 2.9.5
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ