[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200723180648.1429892-3-songliubraving@fb.com>
Date: Thu, 23 Jul 2020 11:06:45 -0700
From: Song Liu <songliubraving@...com>
To: <linux-kernel@...r.kernel.org>, <bpf@...r.kernel.org>,
<netdev@...r.kernel.org>
CC: <ast@...nel.org>, <daniel@...earbox.net>, <kernel-team@...com>,
<john.fastabend@...il.com>, <kpsingh@...omium.org>,
<brouer@...hat.com>, <peterz@...radead.org>,
Song Liu <songliubraving@...com>
Subject: [PATCH v5 bpf-next 2/5] bpf: fail PERF_EVENT_IOC_SET_BPF when bpf_get_[stack|stackid] cannot work
bpf_get_[stack|stackid] on perf_events with precise_ip uses callchain
attached to perf_sample_data. If this callchain is not presented, do not
allow attaching BPF program that calls bpf_get_[stack|stackid] to this
event.
In the error case, -EPROTO is returned so that libbpf can identify this
error and print proper hint message.
Signed-off-by: Song Liu <songliubraving@...com>
---
include/linux/filter.h | 3 ++-
kernel/bpf/verifier.c | 3 +++
kernel/events/core.c | 18 ++++++++++++++++++
3 files changed, 23 insertions(+), 1 deletion(-)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1c6b6d982bf49..135692027dd8b 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -532,7 +532,8 @@ struct bpf_prog {
is_func:1, /* program is a bpf function */
kprobe_override:1, /* Do we override a kprobe? */
has_callchain_buf:1, /* callchain buffer allocated? */
- enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */
+ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
+ call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */
enum bpf_prog_type type; /* Type of BPF program */
enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9a6703bc3f36f..41c9517a505ff 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -4887,6 +4887,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
env->prog->has_callchain_buf = true;
}
+ if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
+ env->prog->call_get_stack = true;
+
if (changes_data)
clear_all_pkt_pointers(env);
return 0;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 856d98c36f562..ddcfd2fb5cc5a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9544,6 +9544,24 @@ static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
if (IS_ERR(prog))
return PTR_ERR(prog);
+ if (event->attr.precise_ip &&
+ prog->call_get_stack &&
+ (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) ||
+ event->attr.exclude_callchain_kernel ||
+ event->attr.exclude_callchain_user)) {
+ /*
+ * On perf_event with precise_ip, calling bpf_get_stack()
+ * may trigger unwinder warnings and occasional crashes.
+ * bpf_get_[stack|stackid] works around this issue by using
+ * callchain attached to perf_sample_data. If the
+ * perf_event does not full (kernel and user) callchain
+ * attached to perf_sample_data, do not allow attaching BPF
+ * program that calls bpf_get_[stack|stackid].
+ */
+ bpf_prog_put(prog);
+ return -EPROTO;
+ }
+
event->prog = prog;
event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
--
2.24.1
Powered by blists - more mailing lists