[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1510471708-12729-1-git-send-email-yupeng0921@gmail.com>
Date: Sun, 12 Nov 2017 07:28:24 +0000
From: yupeng0921@...il.com
To: linux-kernel@...r.kernel.org
Cc: ast@...nel.org, daniel@...earbox.net, rostedt@...dmis.org,
mingo@...hat.com, yupeng0921@...il.com
Subject: [ftrace-bpf 1/5] add BPF_PROG_TYPE_FTRACE to bpf
Add a new type BPF_PROG_TYPE_FTRACE to bpf, let bpf can be attached to
ftrace. Ftrace pass the function parameters to bpf prog, bpf prog
return 1 or 0 to indicate whether ftrace can trace this function. The
major propose is provide an accurate way to trigger function graph
trace. Changes in code:
1. add FTRACE_BPF_FILTER in kernel/trace/Kconfig. Let ftrace pass
function parameter to bpf need to modify architecture dependent code,
so this feature will only be enabled only when it is enabled in
Kconfig and the architecture support this feature. If an architecture
support this feature, it should define a macro whose name is
FTRACE_BPF_FILTER, e.g.:
So other code in kernel can check whether the macro FTRACE_BPF_FILTER
is defined to know whether this feature is really enabled.
2. add BPF_PROG_TYPE_FTRACE in bpf_prog_type
3. check kernel version when load BPF_PROG_TYPE_FTRACE bpf prog
4. define ftrace_prog_func_proto, the prog input is a struct
ftrace_regs type pointer, it is similar as pt_regs in kprobe, it
is an architecture dependent code, if an architecture doens't define
FTRACE_BPF_FILTER, use a fake ftrace_prog_func_proto.
5. add BPF_PROG_TYPE in bpf_types.h
Signed-off-by: yupeng0921@...il.com
---
include/linux/bpf_types.h | 3 +++
include/uapi/linux/bpf.h | 1 +
kernel/bpf/syscall.c | 8 +++++---
kernel/trace/Kconfig | 7 +++++++
kernel/trace/bpf_trace.c | 42 ++++++++++++++++++++++++++++++++++++++++++
5 files changed, 58 insertions(+), 3 deletions(-)
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index e114932..c828904 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -19,6 +19,9 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint_prog_ops)
BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event_prog_ops)
#endif
+#ifdef CONFIG_FTRACE_BPF_FILTER
+BPF_PROG_TYPE(BPF_PROG_TYPE_FTRACE, ftrace_prog_ops)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 30f2ce7..cced53c 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -118,6 +118,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
BPF_PROG_TYPE_KPROBE,
+ BPF_PROG_TYPE_FTRACE,
BPF_PROG_TYPE_SCHED_CLS,
BPF_PROG_TYPE_SCHED_ACT,
BPF_PROG_TYPE_TRACEPOINT,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 25d0749..f73f951 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1001,9 +1001,11 @@ static int bpf_prog_load(union bpf_attr *attr)
if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
return -E2BIG;
- if (type == BPF_PROG_TYPE_KPROBE &&
- attr->kern_version != LINUX_VERSION_CODE)
- return -EINVAL;
+ if (type == BPF_PROG_TYPE_KPROBE || type == BPF_PROG_TYPE_FTRACE) {
+ if (attr->kern_version != LINUX_VERSION_CODE) {
+ return -EINVAL;
+ }
+ }
if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
type != BPF_PROG_TYPE_CGROUP_SKB &&
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 434c840..dde0f01 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -702,6 +702,13 @@ config TRACING_EVENTS_GPIO
help
Enable tracing events for gpio subsystem
+config FTRACE_BPF_FILTER
+ bool "filter function trace by bpf"
+ depends on FUNCTION_GRAPH_TRACER
+ default y
+ help
+ Enable the kernel to triger ftrace by a bpf program
+
endif # FTRACE
endif # TRACING_SUPPORT
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index dc498b6..46e74d1 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -667,3 +667,45 @@ const struct bpf_verifier_ops perf_event_prog_ops = {
.is_valid_access = pe_prog_is_valid_access,
.convert_ctx_access = pe_prog_convert_ctx_access,
};
+
+#ifdef FTRACE_BPF_FILTER
+static const struct bpf_func_proto *ftrace_prog_func_proto(
+ enum bpf_func_id func_id)
+{
+ switch (func_id) {
+ case BPF_FUNC_perf_event_output:
+ return &bpf_perf_event_output_proto;
+ case BPF_FUNC_get_stackid:
+ return &bpf_get_stackid_proto;
+ default:
+ return tracing_func_proto(func_id);
+ }
+}
+
+static bool ftrace_prog_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+{
+ if (off < 0 || off >= sizeof(struct ftrace_regs))
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ /*
+ * Assertion for 32 bit to make sure last 8 byte access
+ * (BPF_DW) to the last 4 byte member is disallowed.
+ */
+ if (off + size > sizeof(struct ftrace_regs))
+ return false;
+
+ return true;
+}
+
+const struct bpf_verifier_ops ftrace_prog_ops = {
+ .get_func_proto = ftrace_prog_func_proto,
+ .is_valid_access = ftrace_prog_is_valid_access,
+};
+#else
+const struct bpf_verifier_ops ftrace_prog_ops;
+#endif /* FTRACE_BPF_FILTER */
--
2.7.4
Powered by blists - more mailing lists