lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 12 Feb 2016 10:11:23 -0600
From:	Tom Zanussi <tom.zanussi@...ux.intel.com>
To:	ast@...mgrid.com, rostedt@...dmis.org
Cc:	masami.hiramatsu.pt@...achi.com, namhyung@...nel.org,
	peterz@...radead.org, linux-kernel@...r.kernel.org,
	Tom Zanussi <tom.zanussi@...ux.intel.com>
Subject: [RFC][PATCH 05/10] eBPF/tracing: Add eBPF trace event field access helpers

Add two eBPF helper functions for accessing trace event contents:

 - bpf_trace_event_field_read() - grab integer field contents from trace events
 - bpf_trace_event_field_read_string() - grab string contents from trace events

Signed-off-by: Tom Zanussi <tom.zanussi@...ux.intel.com>
---
 include/uapi/linux/bpf.h | 18 ++++++++++
 kernel/bpf/verifier.c    |  2 +-
 kernel/trace/bpf_trace.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 106 insertions(+), 1 deletion(-)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index df6a7ff..4045ce9 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -270,6 +270,24 @@ enum bpf_func_id {
 	 * Return: 0 on success
 	 */
 	BPF_FUNC_perf_event_output,
+
+	/**
+	 * bpf_trace_event_field_read(ctx, field_name) - read trace event field contents
+	 * @ctx: struct trace_event_context*
+	 * @field_name: ftrace_event_field name
+	 * Return: value of field in ctx->record
+	 */
+	BPF_FUNC_trace_event_field_read,
+
+	/**
+	 * bpf_trace_event_field_read_string(ctx, field_name, dest, size) - read trace event field string
+	 * @ctx: struct trace_event_context*
+	 * @field_name: ftrace_event_field name
+	 * @dest: destination string buffer
+	 * @size: destination string buffer size
+	 * Return: value of field in ctx->record
+	 */
+	BPF_FUNC_trace_event_field_read_string,
 	__BPF_FUNC_MAX_ID,
 };
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index a7945d1..2b877ff 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -245,7 +245,7 @@ static const struct {
 } func_limit[] = {
 	{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
 	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
-	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
+	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_trace_event_field_read_string},
 };
 
 static void print_verifier_state(struct verifier_env *env)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 78dbac0..eb78c28 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -326,6 +326,89 @@ static struct bpf_prog_type_list kprobe_tl = {
 	.type	= BPF_PROG_TYPE_KPROBE,
 };
 
+static u64 bpf_trace_event_field_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	struct trace_event_context *ctx;
+	struct ftrace_event_field *field;
+	char *field_name;
+	u64 val;
+
+	ctx = (struct trace_event_context *) (long) r1;
+	field_name = (char *) (long) r2;
+
+	field = trace_find_event_field(ctx->call, field_name);
+
+	if (unlikely(!field))
+		return -ENOENT;
+
+	val = field->accessor(field, ctx->record);
+
+	return val;
+}
+
+static const struct bpf_func_proto bpf_trace_event_field_read_proto = {
+	.func		= bpf_trace_event_field_read,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_PTR_TO_STACK,
+};
+
+static int event_field_strlen(struct ftrace_event_field *field, void *rec)
+{
+	int size;
+
+	if (field->filter_type == FILTER_DYN_STRING)
+		size = *(u32 *)(rec + field->offset) >> 16;
+	else if (field->filter_type == FILTER_PTR_STRING)
+		size = strlen((char *)(rec + field->offset));
+	else
+		size = field->size;
+
+	return size;
+}
+
+static u64 bpf_trace_event_field_read_string(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	struct ftrace_event_field *field;
+	struct trace_event_context *ctx;
+	char *field_name;
+	int size, len;
+	void *dst;
+	u64 val;
+
+	ctx = (struct trace_event_context *) (long) r1;
+	field_name = (char *) (long) r2;
+
+	field = trace_find_event_field(ctx->call, field_name);
+	if (unlikely(!field))
+		return -ENOENT;
+
+	val = field->accessor(field, ctx->record);
+
+	dst = (void *)(long)r3;
+	size = (int)r4;
+
+	len = event_field_strlen(field, ctx->record);
+	if (len > size - 1)
+		len = size - 1;
+
+	memset(dst, '\0', size);
+	memcpy(dst, (char *)val, len);
+
+	return 0;
+}
+
+static const struct bpf_func_proto bpf_trace_event_field_read_string_proto = {
+	.func		= bpf_trace_event_field_read_string,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_PTR_TO_STACK,
+	.arg3_type	= ARG_PTR_TO_STACK,
+	.arg4_type	= ARG_CONST_STACK_SIZE,
+};
+
 static const struct bpf_func_proto *
 trace_event_prog_func_proto(enum bpf_func_id func_id)
 {
@@ -356,6 +439,10 @@ trace_event_prog_func_proto(enum bpf_func_id func_id)
 		return &bpf_perf_event_read_proto;
 	case BPF_FUNC_perf_event_output:
 		return &bpf_perf_event_output_proto;
+	case BPF_FUNC_trace_event_field_read:
+		return &bpf_trace_event_field_read_proto;
+	case BPF_FUNC_trace_event_field_read_string:
+		return &bpf_trace_event_field_read_string_proto;
 	default:
 		return NULL;
 	}
-- 
1.9.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ