[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190708163121.18477-11-krzesimir@kinvolk.io>
Date: Mon, 8 Jul 2019 18:31:19 +0200
From: Krzesimir Nowak <krzesimir@...volk.io>
To: linux-kernel@...r.kernel.org
Cc: Alban Crequy <alban@...volk.io>,
Iago López Galeiras <iago@...volk.io>,
Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>,
Martin KaFai Lau <kafai@...com>,
Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
"David S. Miller" <davem@...emloft.net>,
Jakub Kicinski <jakub.kicinski@...ronome.com>,
Jesper Dangaard Brouer <hawk@...nel.org>,
John Fastabend <john.fastabend@...il.com>,
Stanislav Fomichev <sdf@...gle.com>, netdev@...r.kernel.org,
bpf@...r.kernel.org, xdp-newbies@...r.kernel.org,
Krzesimir Nowak <krzesimir@...volk.io>
Subject: [bpf-next v3 10/12] bpf: Implement bpf_prog_test_run for perf event programs
As an input, test run for perf event program takes struct
bpf_perf_event_data as ctx_in and struct bpf_perf_event_value as
data_in. For an output, it basically ignores ctx_out and data_out.
The implementation sets an instance of struct bpf_perf_event_data_kern
in such a way that the BPF program reading data from context will
receive what we passed to the bpf prog test run in ctx_in. Also BPF
program can call bpf_perf_prog_read_value to receive what was passed
in data_in.
Changes since v2:
- drop the changes in perf event verifier test - they are not needed
anymore after reworked ctx size handling
Signed-off-by: Krzesimir Nowak <krzesimir@...volk.io>
---
kernel/trace/bpf_trace.c | 60 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 60 insertions(+)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index ca1255d14576..b870fc2314d0 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -19,6 +19,8 @@
#include "trace_probe.h"
#include "trace.h"
+#include <trace/events/bpf_test_run.h>
+
#define bpf_event_rcu_dereference(p) \
rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
@@ -1160,7 +1162,65 @@ const struct bpf_verifier_ops perf_event_verifier_ops = {
.convert_ctx_access = pe_prog_convert_ctx_access,
};
+static int pe_prog_test_run(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ struct bpf_perf_event_data_kern real_ctx = {0, };
+ struct perf_sample_data sample_data = {0, };
+ struct bpf_perf_event_data *fake_ctx;
+ struct bpf_perf_event_value *value;
+ struct perf_event event = {0, };
+ u32 retval = 0, duration = 0;
+ int err;
+
+ if (kattr->test.data_size_out || kattr->test.data_out)
+ return -EINVAL;
+ if (kattr->test.ctx_size_out || kattr->test.ctx_out)
+ return -EINVAL;
+
+ fake_ctx = bpf_receive_ctx(kattr, sizeof(struct bpf_perf_event_data));
+ if (IS_ERR(fake_ctx))
+ return PTR_ERR(fake_ctx);
+
+ value = bpf_receive_data(kattr, sizeof(struct bpf_perf_event_value));
+ if (IS_ERR(value)) {
+ kfree(fake_ctx);
+ return PTR_ERR(value);
+ }
+
+ real_ctx.regs = &fake_ctx->regs;
+ real_ctx.data = &sample_data;
+ real_ctx.event = &event;
+ perf_sample_data_init(&sample_data, fake_ctx->addr,
+ fake_ctx->sample_period);
+ event.cpu = smp_processor_id();
+ event.oncpu = -1;
+ event.state = PERF_EVENT_STATE_OFF;
+ local64_set(&event.count, value->counter);
+ event.total_time_enabled = value->enabled;
+ event.total_time_running = value->running;
+ /* make self as a leader - it is used only for checking the
+ * state field
+ */
+ event.group_leader = &event;
+ err = bpf_test_run(prog, &real_ctx, kattr->test.repeat,
+ BPF_TEST_RUN_PLAIN, &retval, &duration);
+ if (err) {
+ kfree(value);
+ kfree(fake_ctx);
+ return err;
+ }
+
+ err = bpf_test_finish(uattr, retval, duration);
+ trace_bpf_test_finish(&err);
+ kfree(value);
+ kfree(fake_ctx);
+ return err;
+}
+
const struct bpf_prog_ops perf_event_prog_ops = {
+ .test_run = pe_prog_test_run,
};
static DEFINE_MUTEX(bpf_event_mutex);
--
2.20.1
Powered by blists - more mailing lists