[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <xhsmh7d7hjuhx.mognet@vschneid.remote.csb>
Date: Fri, 22 Apr 2022 11:13:46 +0100
From: Valentin Schneider <vschneid@...hat.com>
To: Delyan Kratunov <delyank@...com>,
"valentin.schneider@....com" <valentin.schneider@....com>
Cc: "bigeasy@...utronix.de" <bigeasy@...utronix.de>,
"dietmar.eggemann@....com" <dietmar.eggemann@....com>,
"keescook@...omium.org" <keescook@...omium.org>,
"andrii@...nel.org" <andrii@...nel.org>,
"vincent.guittot@...aro.org" <vincent.guittot@...aro.org>,
"akpm@...ux-foundation.org" <akpm@...ux-foundation.org>,
"mingo@...nel.org" <mingo@...nel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"rdunlap@...radead.org" <rdunlap@...radead.org>,
"rostedt@...dmis.org" <rostedt@...dmis.org>,
"Kenta.Tada@...y.com" <Kenta.Tada@...y.com>,
"adharmap@...cinc.com" <adharmap@...cinc.com>,
"tglx@...utronix.de" <tglx@...utronix.de>,
"bristot@...hat.com" <bristot@...hat.com>,
"ebiederm@...ssion.com" <ebiederm@...ssion.com>,
"peterz@...radead.org" <peterz@...radead.org>,
"ast@...nel.org" <ast@...nel.org>,
"legion@...nel.org" <legion@...nel.org>,
"ed.tsai@...iatek.com" <ed.tsai@...iatek.com>,
"u.kleine-koenig@...gutronix.de" <u.kleine-koenig@...gutronix.de>,
"juri.lelli@...hat.com" <juri.lelli@...hat.com>,
"x86@...nel.org" <x86@...nel.org>
Subject: Re: [PATCH] sched/tracing: append prev_state to tp args instead
On 21/04/22 22:12, Delyan Kratunov wrote:
> Hi folks,
>
> While working on bpf tooling, we noticed that the sched_switch tracepoint
> signature recently changed in an incompatible manner. This affects the
> runqslower tools in the kernel tree, as well as multiple libbpf tools in iovisor/bcc.
>
Hmph, unfortunate. What should I have run to catch this in the first place?
This doesn't trigger a single warning for me:
$ cd tools/bpf && make runqslower
I'm guessing this is just packaging the tool and the breakage only happens
when the actual bpf syscall happens?
> It would be a fair amount of churn to fix all these tools, not to mention any
> non-public tools people may be using. If you are open to it, here's a
> description and a patch that moves the new argument to the end,
> so existing tools can continue working without change (the new argument
> just won't be extracted in existing programs):
>
> Commit fa2c3254d7cf (sched/tracing: Don't re-read p->state when emitting
> sched_switch event, 2022-01-20) added a new prev_state argument to the
> sched_switch tracepoint, before the prev task_struct pointer.
>
> This reordering of arguments broke BPF programs that use the raw
> tracepoint (e.g. tp_btf programs). The type of the second argument has
> changed and existing programs that assume a task_struct* argument
> (e.g. for bpf_task_storage or member access) will now fail to verify.
>
> If we instead append the new argument to the end, all existing programs
> will continue to work and can conditionally extract the prev_state
> argument on supported kernel versions.
>
Providing this didn't miss any new user of the sched_switch TP (I didn't
find any with rg '\bregister_[A-z,0-9,-,_]+sched_switch'), I'm okay with it
(well, I think this falls into breaking change category, so I don't have
much choice do I :-))
Reviewed-by: Valentin Schneider <vschneid@...hat.com>
> Fixes: fa2c3254d7cf (sched/tracing: Don't re-read p->state when emitting sched_switch event, 2022-01-20)
> Signed-off-by: Delyan Kratunov <delyank@...com>
> ---
> include/trace/events/sched.h | 6 +++---
> kernel/sched/core.c | 2 +-
> kernel/trace/fgraph.c | 4 ++--
> kernel/trace/ftrace.c | 4 ++--
> kernel/trace/trace_events.c | 8 ++++----
> kernel/trace/trace_osnoise.c | 4 ++--
> kernel/trace/trace_sched_switch.c | 4 ++--
> kernel/trace/trace_sched_wakeup.c | 4 ++--
> 8 files changed, 18 insertions(+), 18 deletions(-)
>
> diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
> index 65e786756321..fbb99a61f714 100644
> --- a/include/trace/events/sched.h
> +++ b/include/trace/events/sched.h
> @@ -222,11 +222,11 @@ static inline long __trace_sched_switch_state(bool preempt,
> TRACE_EVENT(sched_switch,
>
> TP_PROTO(bool preempt,
> - unsigned int prev_state,
> struct task_struct *prev,
> - struct task_struct *next),
> + struct task_struct *next,
> + unsigned int prev_state),
>
> - TP_ARGS(preempt, prev_state, prev, next),
> + TP_ARGS(preempt, prev, next, prev_state),
>
> TP_STRUCT__entry(
> __array( char, prev_comm, TASK_COMM_LEN )
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 51efaabac3e4..d58c0389eb23 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -6382,7 +6382,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
> migrate_disable_switch(rq, prev);
> psi_sched_switch(prev, next, !task_on_rq_queued(prev));
>
> - trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev_state, prev, next);
> + trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
>
> /* Also unlocks the rq: */
> rq = context_switch(rq, prev, next, &rf);
> diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
> index 8f4fb328133a..a7e84c8543cb 100644
> --- a/kernel/trace/fgraph.c
> +++ b/kernel/trace/fgraph.c
> @@ -404,9 +404,9 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
>
> static void
> ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
> - unsigned int prev_state,
> struct task_struct *prev,
> - struct task_struct *next)
> + struct task_struct *next,
> + unsigned int prev_state)
> {
> unsigned long long timestamp;
> int index;
> diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
> index 4f1d2f5e7263..af899b058c8d 100644
> --- a/kernel/trace/ftrace.c
> +++ b/kernel/trace/ftrace.c
> @@ -7420,9 +7420,9 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
>
> static void
> ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
> - unsigned int prev_state,
> struct task_struct *prev,
> - struct task_struct *next)
> + struct task_struct *next,
> + unsigned int prev_state)
> {
> struct trace_array *tr = data;
> struct trace_pid_list *pid_list;
> diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
> index e11e167b7809..f97de82d1342 100644
> --- a/kernel/trace/trace_events.c
> +++ b/kernel/trace/trace_events.c
> @@ -773,9 +773,9 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable)
>
> static void
> event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
> - unsigned int prev_state,
> struct task_struct *prev,
> - struct task_struct *next)
> + struct task_struct *next,
> + unsigned int prev_state)
> {
> struct trace_array *tr = data;
> struct trace_pid_list *no_pid_list;
> @@ -799,9 +799,9 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
>
> static void
> event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
> - unsigned int prev_state,
> struct task_struct *prev,
> - struct task_struct *next)
> + struct task_struct *next,
> + unsigned int prev_state)
> {
> struct trace_array *tr = data;
> struct trace_pid_list *no_pid_list;
> diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
> index e9ae1f33a7f0..afb92e2f0aea 100644
> --- a/kernel/trace/trace_osnoise.c
> +++ b/kernel/trace/trace_osnoise.c
> @@ -1168,9 +1168,9 @@ thread_exit(struct osnoise_variables *osn_var, struct task_struct *t)
> */
> static void
> trace_sched_switch_callback(void *data, bool preempt,
> - unsigned int prev_state,
> struct task_struct *p,
> - struct task_struct *n)
> + struct task_struct *n,
> + unsigned int prev_state)
> {
> struct osnoise_variables *osn_var = this_cpu_osn_var();
>
> diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
> index 45796d8bd4b2..c9ffdcfe622e 100644
> --- a/kernel/trace/trace_sched_switch.c
> +++ b/kernel/trace/trace_sched_switch.c
> @@ -22,8 +22,8 @@ static DEFINE_MUTEX(sched_register_mutex);
>
> static void
> probe_sched_switch(void *ignore, bool preempt,
> - unsigned int prev_state,
> - struct task_struct *prev, struct task_struct *next)
> + struct task_struct *prev, struct task_struct *next,
> + unsigned int prev_state)
> {
> int flags;
>
> diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
> index 46429f9a96fa..330aee1c1a49 100644
> --- a/kernel/trace/trace_sched_wakeup.c
> +++ b/kernel/trace/trace_sched_wakeup.c
> @@ -426,8 +426,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
>
> static void notrace
> probe_wakeup_sched_switch(void *ignore, bool preempt,
> - unsigned int prev_state,
> - struct task_struct *prev, struct task_struct *next)
> + struct task_struct *prev, struct task_struct *next,
> + unsigned int prev_state)
> {
> struct trace_array_cpu *data;
> u64 T0, T1, delta;
> --
> 2.35.1
Powered by blists - more mailing lists