[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <07a50582-1c2f-7f45-c7dd-5ff9c2ff3052@oracle.com>
Date: Tue, 10 Mar 2020 11:55:57 +0100
From: Alexandre Chartre <alexandre.chartre@...cle.com>
To: Thomas Gleixner <tglx@...utronix.de>,
LKML <linux-kernel@...r.kernel.org>
Cc: x86@...nel.org, Steven Rostedt <rostedt@...dmis.org>,
Brian Gerst <brgerst@...il.com>,
Juergen Gross <jgross@...e.com>,
Frederic Weisbecker <frederic@...nel.org>
Subject: Re: [patch part-II V2 08/13] tracing: Provide lockdep less
trace_hardirqs_on/off() variants
On 3/8/20 11:24 PM, Thomas Gleixner wrote:
> trace_hardirqs_on/off() is only partially safe vs. RCU idle. The tracer
> core itself is safe, but the resulting tracepoints can be utilized by
> e.g. BPF which is unsafe.
>
> Provide variants which do not contain the lockdep invocation so the lockdep
> and tracer invocations can be split at the call site and placed properly.
>
> The new variants also do not use rcuidle as they are going to be called
> from entry code after/before context tracking.
>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
> ---
> V2: New patch
> ---
> include/linux/irqflags.h | 4 ++++
> kernel/trace/trace_preemptirq.c | 23 +++++++++++++++++++++++
> 2 files changed, 27 insertions(+)
>
> --- a/include/linux/irqflags.h
> +++ b/include/linux/irqflags.h
> @@ -29,6 +29,8 @@
> #endif
>
> #ifdef CONFIG_TRACE_IRQFLAGS
> + extern void __trace_hardirqs_on(void);
> + extern void __trace_hardirqs_off(void);
> extern void trace_hardirqs_on(void);
> extern void trace_hardirqs_off(void);
> # define trace_hardirq_context(p) ((p)->hardirq_context)
> @@ -52,6 +54,8 @@ do { \
> current->softirq_context--; \
> } while (0)
> #else
> +# define __trace_hardirqs_on() do { } while (0)
> +# define __trace_hardirqs_off() do { } while (0)
> # define trace_hardirqs_on() do { } while (0)
> # define trace_hardirqs_off() do { } while (0)
> # define trace_hardirq_context(p) 0
> --- a/kernel/trace/trace_preemptirq.c
> +++ b/kernel/trace/trace_preemptirq.c
> @@ -19,6 +19,17 @@
> /* Per-cpu variable to prevent redundant calls when IRQs already off */
> static DEFINE_PER_CPU(int, tracing_irq_cpu);
>
> +void __trace_hardirqs_on(void)
> +{
> + if (this_cpu_read(tracing_irq_cpu)) {
> + if (!in_nmi())
> + trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
> + tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
> + this_cpu_write(tracing_irq_cpu, 0);
> + }
> +}
> +NOKPROBE_SYMBOL(__trace_hardirqs_on);
> +
Shouldn't trace_hardirqs_on() be updated to call __trace_hardirqs_on()? It's the same
code except for the lockdep call.
void trace_hardirqs_on(void)
{
__trace_hardirqs_on();
lockdep_hardirqs_on(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_on);
NOKPROBE_SYMBOL(trace_hardirqs_on);
> void trace_hardirqs_on(void)
> {
> if (this_cpu_read(tracing_irq_cpu)) {
> @@ -33,6 +44,18 @@ void trace_hardirqs_on(void)
> EXPORT_SYMBOL(trace_hardirqs_on);
> NOKPROBE_SYMBOL(trace_hardirqs_on);
>
> +void __trace_hardirqs_off(void)
> +{
> + if (!this_cpu_read(tracing_irq_cpu)) {
> + this_cpu_write(tracing_irq_cpu, 1);
> + tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
> + if (!in_nmi())
> + trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
> + }
> +
> +}
> +NOKPROBE_SYMBOL(__trace_hardirqs_off);
> +
Same comment here.
alex.
> void trace_hardirqs_off(void)
> {
> if (!this_cpu_read(tracing_irq_cpu)) {
>
Powered by blists - more mailing lists