[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210218222125.46565-6-mjeanson@efficios.com>
Date: Thu, 18 Feb 2021 17:21:24 -0500
From: Michael Jeanson <mjeanson@...icios.com>
To: linux-kernel@...r.kernel.org
Cc: Michael Jeanson <mjeanson@...icios.com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Steven Rostedt <rostedt@...dmis.org>,
Peter Zijlstra <peterz@...radead.org>,
Alexei Starovoitov <ast@...nel.org>,
Yonghong Song <yhs@...com>,
"Paul E . McKenney" <paulmck@...nel.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>, bpf@...r.kernel.org,
Joel Fernandes <joel@...lfernandes.org>
Subject: [RFC PATCH 5/6] tracing: convert sys_enter/exit to faultable tracepoints
Convert the definition of the system call enter/exit tracepoints to
faultable tracepoints now that all upstream tracers handle it.
Co-developed-by: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@...icios.com>
Signed-off-by: Michael Jeanson <mjeanson@...icios.com>
Cc: Steven Rostedt (VMware) <rostedt@...dmis.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Alexei Starovoitov <ast@...nel.org>
Cc: Yonghong Song <yhs@...com>
Cc: Paul E. McKenney <paulmck@...nel.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: Mark Rutland <mark.rutland@....com>
Cc: Alexander Shishkin <alexander.shishkin@...ux.intel.com>
Cc: Jiri Olsa <jolsa@...hat.com>
Cc: Namhyung Kim <namhyung@...nel.org>
Cc: bpf@...r.kernel.org
Cc: Joel Fernandes <joel@...lfernandes.org>
---
include/trace/events/syscalls.h | 4 +-
kernel/trace/trace_syscalls.c | 84 +++++++++++++++++++++++----------
2 files changed, 60 insertions(+), 28 deletions(-)
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index b6e0cbc2c71f..2bd2d94563a2 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -15,7 +15,7 @@
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
-TRACE_EVENT_FN(sys_enter,
+TRACE_EVENT_FN_MAYFAULT(sys_enter,
TP_PROTO(struct pt_regs *regs, long id),
@@ -41,7 +41,7 @@ TRACE_EVENT_FN(sys_enter,
TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY)
-TRACE_EVENT_FN(sys_exit,
+TRACE_EVENT_FN_MAYFAULT(sys_exit,
TP_PROTO(struct pt_regs *regs, long ret),
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index d85a2f0f316b..4ca9190e26b2 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -304,21 +304,27 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
int syscall_nr;
int size;
+ /*
+ * Probe called with preemption enabled (mayfault), but ring buffer and
+ * per-cpu data require preemption to be disabled.
+ */
+ preempt_disable_notrace();
+
syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
+ goto end;
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
if (!trace_file)
- return;
+ goto end;
if (trace_trigger_soft_disabled(trace_file))
- return;
+ goto end;
sys_data = syscall_nr_to_meta(syscall_nr);
if (!sys_data)
- return;
+ goto end;
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
@@ -329,7 +335,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
event = trace_buffer_lock_reserve(buffer,
sys_data->enter_event->event.type, size, irq_flags, pc);
if (!event)
- return;
+ goto end;
entry = ring_buffer_event_data(event);
entry->nr = syscall_nr;
@@ -338,6 +344,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
event_trigger_unlock_commit(trace_file, buffer, event, entry,
irq_flags, pc);
+end:
+ preempt_enable_notrace();
}
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
@@ -352,21 +360,27 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
int pc;
int syscall_nr;
+ /*
+ * Probe called with preemption enabled (mayfault), but ring buffer and
+ * per-cpu data require preemption to be disabled.
+ */
+ preempt_disable_notrace();
+
syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
+ goto end;
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
if (!trace_file)
- return;
+ goto end;
if (trace_trigger_soft_disabled(trace_file))
- return;
+ goto end;
sys_data = syscall_nr_to_meta(syscall_nr);
if (!sys_data)
- return;
+ goto end;
local_save_flags(irq_flags);
pc = preempt_count();
@@ -376,7 +390,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
sys_data->exit_event->event.type, sizeof(*entry),
irq_flags, pc);
if (!event)
- return;
+ goto end;
entry = ring_buffer_event_data(event);
entry->nr = syscall_nr;
@@ -384,6 +398,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
event_trigger_unlock_commit(trace_file, buffer, event, entry,
irq_flags, pc);
+end:
+ preempt_enable_notrace();
}
static int reg_event_syscall_enter(struct trace_event_file *file,
@@ -398,7 +414,7 @@ static int reg_event_syscall_enter(struct trace_event_file *file,
return -ENOSYS;
mutex_lock(&syscall_trace_lock);
if (!tr->sys_refcount_enter)
- ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
+ ret = register_trace_mayfault_sys_enter(ftrace_syscall_enter, tr);
if (!ret) {
rcu_assign_pointer(tr->enter_syscall_files[num], file);
tr->sys_refcount_enter++;
@@ -436,7 +452,7 @@ static int reg_event_syscall_exit(struct trace_event_file *file,
return -ENOSYS;
mutex_lock(&syscall_trace_lock);
if (!tr->sys_refcount_exit)
- ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
+ ret = register_trace_mayfault_sys_exit(ftrace_syscall_exit, tr);
if (!ret) {
rcu_assign_pointer(tr->exit_syscall_files[num], file);
tr->sys_refcount_exit++;
@@ -600,20 +616,26 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
int rctx;
int size;
+ /*
+ * Probe called with preemption enabled (mayfault), but ring buffer and
+ * per-cpu data require preemption to be disabled.
+ */
+ preempt_disable_notrace();
+
syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
+ goto end;
if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
- return;
+ goto end;
sys_data = syscall_nr_to_meta(syscall_nr);
if (!sys_data)
- return;
+ goto end;
head = this_cpu_ptr(sys_data->enter_event->perf_events);
valid_prog_array = bpf_prog_array_valid(sys_data->enter_event);
if (!valid_prog_array && hlist_empty(head))
- return;
+ goto end;
/* get the size after alignment with the u32 buffer size field */
size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
@@ -622,7 +644,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
rec = perf_trace_buf_alloc(size, NULL, &rctx);
if (!rec)
- return;
+ goto end;
rec->nr = syscall_nr;
syscall_get_arguments(current, regs, args);
@@ -632,12 +654,14 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
!perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
hlist_empty(head)) {
perf_swevent_put_recursion_context(rctx);
- return;
+ goto end;
}
perf_trace_buf_submit(rec, size, rctx,
sys_data->enter_event->event.type, 1, regs,
head, NULL);
+end:
+ preempt_enable_notrace();
}
static int perf_sysenter_enable(struct trace_event_call *call)
@@ -649,7 +673,7 @@ static int perf_sysenter_enable(struct trace_event_call *call)
mutex_lock(&syscall_trace_lock);
if (!sys_perf_refcount_enter)
- ret = register_trace_sys_enter(perf_syscall_enter, NULL);
+ ret = register_trace_mayfault_sys_enter(perf_syscall_enter, NULL);
if (ret) {
pr_info("event trace: Could not activate syscall entry trace point");
} else {
@@ -699,20 +723,26 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
int rctx;
int size;
+ /*
+ * Probe called with preemption enabled (mayfault), but ring buffer and
+ * per-cpu data require preemption to be disabled.
+ */
+ preempt_disable_notrace();
+
syscall_nr = trace_get_syscall_nr(current, regs);
if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
- return;
+ goto end;
if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
- return;
+ goto end;
sys_data = syscall_nr_to_meta(syscall_nr);
if (!sys_data)
- return;
+ goto end;
head = this_cpu_ptr(sys_data->exit_event->perf_events);
valid_prog_array = bpf_prog_array_valid(sys_data->exit_event);
if (!valid_prog_array && hlist_empty(head))
- return;
+ goto end;
/* We can probably do that at build time */
size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
@@ -720,7 +750,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
rec = perf_trace_buf_alloc(size, NULL, &rctx);
if (!rec)
- return;
+ goto end;
rec->nr = syscall_nr;
rec->ret = syscall_get_return_value(current, regs);
@@ -729,11 +759,13 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
!perf_call_bpf_exit(sys_data->exit_event, regs, rec)) ||
hlist_empty(head)) {
perf_swevent_put_recursion_context(rctx);
- return;
+ goto end;
}
perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
1, regs, head, NULL);
+end:
+ preempt_enable_notrace();
}
static int perf_sysexit_enable(struct trace_event_call *call)
@@ -745,7 +777,7 @@ static int perf_sysexit_enable(struct trace_event_call *call)
mutex_lock(&syscall_trace_lock);
if (!sys_perf_refcount_exit)
- ret = register_trace_sys_exit(perf_syscall_exit, NULL);
+ ret = register_trace_mayfault_sys_exit(perf_syscall_exit, NULL);
if (ret) {
pr_info("event trace: Could not activate syscall exit trace point");
} else {
--
2.25.1
Powered by blists - more mailing lists