[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191229143740.29143-3-jolsa@kernel.org>
Date: Sun, 29 Dec 2019 15:37:37 +0100
From: Jiri Olsa <jolsa@...nel.org>
To: Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>
Cc: netdev@...r.kernel.org, bpf@...r.kernel.org,
Andrii Nakryiko <andriin@...com>, Yonghong Song <yhs@...com>,
Martin KaFai Lau <kafai@...com>,
Jakub Kicinski <jakub.kicinski@...ronome.com>,
David Miller <davem@...hat.com>
Subject: [PATCH 2/5] bpf: Add bpf_perf_event_output_kfunc
Adding support to use perf_event_output in
BPF_TRACE_FENTRY/BPF_TRACE_FEXIT programs.
There are no pt_regs available in the trampoline,
so getting one via bpf_kfunc_regs array.
Signed-off-by: Jiri Olsa <jolsa@...nel.org>
---
kernel/trace/bpf_trace.c | 67 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 67 insertions(+)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index e5ef4ae9edb5..1b270bbd9016 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1151,6 +1151,69 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
}
}
+struct bpf_kfunc_regs {
+ struct pt_regs regs[3];
+};
+
+static DEFINE_PER_CPU(struct bpf_kfunc_regs, bpf_kfunc_regs);
+static DEFINE_PER_CPU(int, bpf_kfunc_nest_level);
+
+static struct pt_regs *get_bpf_kfunc_regs(void)
+{
+ struct bpf_kfunc_regs *tp_regs = this_cpu_ptr(&bpf_kfunc_regs);
+ int nest_level = this_cpu_inc_return(bpf_kfunc_nest_level);
+
+ if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
+ this_cpu_dec(bpf_kfunc_nest_level);
+ return ERR_PTR(-EBUSY);
+ }
+
+ return &tp_regs->regs[nest_level - 1];
+}
+
+static void put_bpf_kfunc_regs(void)
+{
+ this_cpu_dec(bpf_kfunc_nest_level);
+}
+
+BPF_CALL_5(bpf_perf_event_output_kfunc, void *, ctx, struct bpf_map *, map,
+ u64, flags, void *, data, u64, size)
+{
+ struct pt_regs *regs = get_bpf_kfunc_regs();
+ int ret;
+
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ perf_fetch_caller_regs(regs);
+ ret = ____bpf_perf_event_output(regs, map, flags, data, size);
+
+ put_bpf_kfunc_regs();
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_perf_event_output_proto_kfunc = {
+ .func = bpf_perf_event_output_kfunc,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_MEM,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
+};
+
+static const struct bpf_func_proto *
+kfunc_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+ case BPF_FUNC_perf_event_output:
+ return &bpf_perf_event_output_proto_kfunc;
+ default:
+ return tracing_func_proto(func_id, prog);
+ }
+}
+
static const struct bpf_func_proto *
tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -1160,6 +1223,10 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skb_output_proto;
#endif
default:
+ if (prog->expected_attach_type == BPF_TRACE_FENTRY ||
+ prog->expected_attach_type == BPF_TRACE_FEXIT)
+ return kfunc_prog_func_proto(func_id, prog);
+
return raw_tp_prog_func_proto(func_id, prog);
}
}
--
2.21.1
Powered by blists - more mailing lists