[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240620-fault-injection-statickeys-v2-3-e23947d3d84b@suse.cz>
Date: Thu, 20 Jun 2024 00:48:57 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Akinobu Mita <akinobu.mita@...il.com>, Christoph Lameter <cl@...ux.com>,
David Rientjes <rientjes@...gle.com>, Alexei Starovoitov <ast@...nel.org>,
Daniel Borkmann <daniel@...earbox.net>, Andrii Nakryiko <andrii@...nel.org>,
"Naveen N. Rao" <naveen.n.rao@...ux.ibm.com>,
Anil S Keshavamurthy <anil.s.keshavamurthy@...el.com>,
"David S. Miller" <davem@...emloft.net>,
Masami Hiramatsu <mhiramat@...nel.org>,
Steven Rostedt <rostedt@...dmis.org>, Mark Rutland <mark.rutland@....com>
Cc: Jiri Olsa <jolsa@...nel.org>, Roman Gushchin <roman.gushchin@...ux.dev>,
Hyeonggon Yoo <42.hyeyoo@...il.com>, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, bpf@...r.kernel.org, linux-trace-kernel@...r.kernel.org,
Vlastimil Babka <vbabka@...e.cz>
Subject: [PATCH v2 3/7] bpf: support error injection static keys for
perf_event attached progs
Functions marked for error injection can have an associated static key
that guards the callsite(s) to avoid overhead of calling an empty
function when no error injection is in progress.
Outside of the error injection framework itself, bpf programs can be
atteched to perf events and override results of error-injectable
functions. To make sure these functions are actually called, attaching
such bpf programs should control the static key accordingly.
Therefore, add the static key's address to struct trace_kprobe and fill
it in trace_kprobe_error_injectable(), using get_injection_key() instead
of within_error_injection_list(). Introduce
trace_kprobe_error_injection_control() to control the static key and
call the control function when attaching or detaching programs with
kprobe_override to perf events.
Signed-off-by: Vlastimil Babka <vbabka@...e.cz>
---
kernel/trace/bpf_trace.c | 6 ++++++
kernel/trace/trace_kprobe.c | 30 ++++++++++++++++++++++++++++--
kernel/trace/trace_probe.h | 5 +++++
3 files changed, 39 insertions(+), 2 deletions(-)
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index f5154c051d2c..944de1c41209 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2283,6 +2283,9 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
rcu_assign_pointer(event->tp_event->prog_array, new_array);
bpf_prog_array_free_sleepable(old_array);
+ if (prog->kprobe_override)
+ trace_kprobe_error_injection_control(event->tp_event, true);
+
unlock:
mutex_unlock(&bpf_event_mutex);
return ret;
@@ -2299,6 +2302,9 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
if (!event->prog)
goto unlock;
+ if (event->prog->kprobe_override)
+ trace_kprobe_error_injection_control(event->tp_event, false);
+
old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
if (ret == -ENOENT)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 16383247bdbf..1c1ee95bd5de 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -61,6 +61,7 @@ struct trace_kprobe {
unsigned long __percpu *nhit;
const char *symbol; /* symbol name */
struct trace_probe tp;
+ struct static_key *ei_key;
};
static bool is_trace_kprobe(struct dyn_event *ev)
@@ -235,9 +236,34 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call)
bool trace_kprobe_error_injectable(struct trace_event_call *call)
{
struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
+ struct static_key *ei_key;
- return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
- false;
+ if (!tk)
+ return false;
+
+ ei_key = get_injection_key(trace_kprobe_address(tk));
+ if (IS_ERR(ei_key))
+ return false;
+
+ tk->ei_key = ei_key;
+ return true;
+}
+
+void trace_kprobe_error_injection_control(struct trace_event_call *call,
+ bool enable)
+{
+ struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
+
+ if (!tk)
+ return;
+
+ if (!tk->ei_key)
+ return;
+
+ if (enable)
+ static_key_slow_inc(tk->ei_key);
+ else
+ static_key_slow_dec(tk->ei_key);
}
static int register_kprobe_event(struct trace_kprobe *tk);
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 5803e6a41570..d9ddcabb9f97 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -212,6 +212,8 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(symbol);
#ifdef CONFIG_KPROBE_EVENTS
bool trace_kprobe_on_func_entry(struct trace_event_call *call);
bool trace_kprobe_error_injectable(struct trace_event_call *call);
+void trace_kprobe_error_injection_control(struct trace_event_call *call,
+ bool enabled);
#else
static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
{
@@ -222,6 +224,9 @@ static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
{
return false;
}
+
+static inline void trace_kprobe_error_injection_control(struct trace_event_call *call,
+ bool enabled) { }
#endif /* CONFIG_KPROBE_EVENTS */
struct probe_arg {
--
2.45.2
Powered by blists - more mailing lists