[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190625194215.14927-9-krzesimir@kinvolk.io>
Date: Tue, 25 Jun 2019 21:42:13 +0200
From: Krzesimir Nowak <krzesimir@...volk.io>
To: netdev@...r.kernel.org
Cc: Alban Crequy <alban@...volk.io>,
Iago López Galeiras <iago@...volk.io>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
linux-kernel@...r.kernel.org, bpf@...r.kernel.org,
Krzesimir Nowak <krzesimir@...volk.io>
Subject: [bpf-next v2 08/10] bpf: Implement bpf_prog_test_run for perf event programs
As an input, test run for perf event program takes struct
bpf_perf_event_data as ctx_in and struct bpf_perf_event_value as
data_in. For an output, it basically ignores ctx_out and data_out.
The implementation sets an instance of struct bpf_perf_event_data_kern
in such a way that the BPF program reading data from context will
receive what we passed to the bpf prog test run in ctx_in. Also BPF
program can call bpf_perf_prog_read_value to receive what was passed
in data_in.
Signed-off-by: Krzesimir Nowak <krzesimir@...volk.io>
---
kernel/trace/bpf_trace.c | 107 ++++++++++++++++++
.../bpf/verifier/perf_event_sample_period.c | 8 ++
2 files changed, 115 insertions(+)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index c102c240bb0b..2fa49ea8a475 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -16,6 +16,8 @@
#include <asm/tlb.h>
+#include <trace/events/bpf_test_run.h>
+
#include "trace_probe.h"
#include "trace.h"
@@ -1160,7 +1162,112 @@ const struct bpf_verifier_ops perf_event_verifier_ops = {
.convert_ctx_access = pe_prog_convert_ctx_access,
};
+static int pe_prog_test_run(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
+ void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
+ u32 data_size_in = kattr->test.data_size_in;
+ u32 ctx_size_in = kattr->test.ctx_size_in;
+ u32 repeat = kattr->test.repeat;
+ u32 retval = 0, duration = 0;
+ int err = -EINVAL;
+ u64 time_start, time_spent = 0;
+ int i;
+ struct perf_sample_data sample_data = {0, };
+ struct perf_event event = {0, };
+ struct bpf_perf_event_data_kern real_ctx = {0, };
+ struct bpf_perf_event_data fake_ctx = {0, };
+ struct bpf_perf_event_value value = {0, };
+
+ if (ctx_size_in != sizeof(fake_ctx))
+ goto out;
+ if (data_size_in != sizeof(value))
+ goto out;
+
+ if (copy_from_user(&fake_ctx, ctx_in, ctx_size_in)) {
+ err = -EFAULT;
+ goto out;
+ }
+ if (copy_from_user(&value, data_in, data_size_in)) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ real_ctx.regs = &fake_ctx.regs;
+ real_ctx.data = &sample_data;
+ real_ctx.event = &event;
+ perf_sample_data_init(&sample_data, fake_ctx.addr,
+ fake_ctx.sample_period);
+ event.cpu = smp_processor_id();
+ event.oncpu = -1;
+ event.state = PERF_EVENT_STATE_OFF;
+ local64_set(&event.count, value.counter);
+ event.total_time_enabled = value.enabled;
+ event.total_time_running = value.running;
+ /* make self as a leader - it is used only for checking the
+ * state field
+ */
+ event.group_leader = &event;
+
+ /* slightly changed copy pasta from bpf_test_run() in
+ * net/bpf/test_run.c
+ */
+ if (!repeat)
+ repeat = 1;
+
+ rcu_read_lock();
+ preempt_disable();
+ time_start = ktime_get_ns();
+ for (i = 0; i < repeat; i++) {
+ retval = BPF_PROG_RUN(prog, &real_ctx);
+
+ if (signal_pending(current)) {
+ err = -EINTR;
+ preempt_enable();
+ rcu_read_unlock();
+ goto out;
+ }
+
+ if (need_resched()) {
+ time_spent += ktime_get_ns() - time_start;
+ preempt_enable();
+ rcu_read_unlock();
+
+ cond_resched();
+
+ rcu_read_lock();
+ preempt_disable();
+ time_start = ktime_get_ns();
+ }
+ }
+ time_spent += ktime_get_ns() - time_start;
+ preempt_enable();
+ rcu_read_unlock();
+
+ do_div(time_spent, repeat);
+ duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
+ /* end of slightly changed copy pasta from bpf_test_run() in
+ * net/bpf/test_run.c
+ */
+
+ if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) {
+ err = -EFAULT;
+ goto out;
+ }
+ if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) {
+ err = -EFAULT;
+ goto out;
+ }
+ err = 0;
+out:
+ trace_bpf_test_finish(&err);
+ return err;
+}
+
const struct bpf_prog_ops perf_event_prog_ops = {
+ .test_run = pe_prog_test_run,
};
static DEFINE_MUTEX(bpf_event_mutex);
diff --git a/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c
index 471c1a5950d8..16e9e5824d14 100644
--- a/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c
+++ b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c
@@ -13,6 +13,8 @@
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
+ .ctx_len = sizeof(struct bpf_perf_event_data),
+ .data_len = sizeof(struct bpf_perf_event_value),
},
{
"check bpf_perf_event_data->sample_period half load permitted",
@@ -29,6 +31,8 @@
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
+ .ctx_len = sizeof(struct bpf_perf_event_data),
+ .data_len = sizeof(struct bpf_perf_event_value),
},
{
"check bpf_perf_event_data->sample_period word load permitted",
@@ -45,6 +49,8 @@
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
+ .ctx_len = sizeof(struct bpf_perf_event_data),
+ .data_len = sizeof(struct bpf_perf_event_value),
},
{
"check bpf_perf_event_data->sample_period dword load permitted",
@@ -56,4 +62,6 @@
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_PERF_EVENT,
+ .ctx_len = sizeof(struct bpf_perf_event_data),
+ .data_len = sizeof(struct bpf_perf_event_value),
},
--
2.20.1
Powered by blists - more mailing lists