lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat, 16 May 2020 01:45:49 +0200
From:   Thomas Gleixner <tglx@...utronix.de>
To:     LKML <linux-kernel@...r.kernel.org>
Cc:     x86@...nel.org, "Paul E. McKenney" <paulmck@...nel.org>,
        Andy Lutomirski <luto@...nel.org>,
        Alexandre Chartre <alexandre.chartre@...cle.com>,
        Frederic Weisbecker <frederic@...nel.org>,
        Paolo Bonzini <pbonzini@...hat.com>,
        Sean Christopherson <sean.j.christopherson@...el.com>,
        Masami Hiramatsu <mhiramat@...nel.org>,
        Petr Mladek <pmladek@...e.com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Joel Fernandes <joel@...lfernandes.org>,
        Boris Ostrovsky <boris.ostrovsky@...cle.com>,
        Juergen Gross <jgross@...e.com>,
        Brian Gerst <brgerst@...il.com>,
        Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
        Josh Poimboeuf <jpoimboe@...hat.com>,
        Will Deacon <will@...nel.org>,
        Tom Lendacky <thomas.lendacky@....com>,
        Wei Liu <wei.liu@...nel.org>,
        Michael Kelley <mikelley@...rosoft.com>,
        Jason Chen CJ <jason.cj.chen@...el.com>,
        Zhao Yakui <yakui.zhao@...el.com>,
        "Peter Zijlstra (Intel)" <peterz@...radead.org>
Subject: [patch V6 02/37] tracing/hwlat: Split ftrace_nmi_enter/exit()

The hardware latency tracer calls into timekeeping and ends up in
various instrumentable functions which is problematic vs. the kprobe
handling especially the text poke machinery. It's invoked from
nmi_enter/exit(), i.e. non-instrumentable code.

Split it into two parts:

 1) NMI counter, only invoked on nmi_enter() and noinstr safe

 2) NMI timestamping, to be invoked from instrumentable code

Move it into the rcu is watching regions of nmi_enter/exit() even
if there is no actual RCU dependency right now but there is also
no point in having it early.

The actual split of nmi_enter/exit() is done in a separate step.

Requested-by: Steven Rostedt <rostedt@...dmis.org>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
 include/linux/ftrace_irq.h |   31 +++++++++++++++++++------------
 include/linux/hardirq.h    |    5 +++--
 kernel/trace/trace_hwlat.c |   19 ++++++++++++-------
 3 files changed, 34 insertions(+), 21 deletions(-)

--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -4,23 +4,30 @@
 
 #ifdef CONFIG_HWLAT_TRACER
 extern bool trace_hwlat_callback_enabled;
-extern void trace_hwlat_callback(bool enter);
-#endif
+extern void trace_hwlat_count_nmi(void);
+extern void trace_hwlat_timestamp(bool enter);
 
-static inline void ftrace_nmi_enter(void)
+static __always_inline void ftrace_count_nmi(void)
 {
-#ifdef CONFIG_HWLAT_TRACER
-	if (trace_hwlat_callback_enabled)
-		trace_hwlat_callback(true);
-#endif
+	if (unlikely(trace_hwlat_callback_enabled))
+		trace_hwlat_count_nmi();
 }
 
-static inline void ftrace_nmi_exit(void)
+static __always_inline void ftrace_nmi_handler_enter(void)
 {
-#ifdef CONFIG_HWLAT_TRACER
-	if (trace_hwlat_callback_enabled)
-		trace_hwlat_callback(false);
-#endif
+	if (unlikely(trace_hwlat_callback_enabled))
+		trace_hwlat_timestamp(true);
 }
 
+static __always_inline void ftrace_nmi_handler_exit(void)
+{
+	if (unlikely(trace_hwlat_callback_enabled))
+		trace_hwlat_timestamp(false);
+}
+#else /* CONFIG_HWLAT_TRACER */
+static inline void ftrace_count_nmi(void) {}
+static inline void ftrace_nmi_handler_enter(void) {}
+static inline void ftrace_nmi_handler_exit(void) {}
+#endif
+
 #endif /* _LINUX_FTRACE_IRQ_H */
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -82,20 +82,21 @@ extern void irq_exit(void);
 		arch_nmi_enter();				\
 		printk_nmi_enter();				\
 		lockdep_off();					\
-		ftrace_nmi_enter();				\
 		BUG_ON(in_nmi() == NMI_MASK);			\
 		__preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET);	\
 		rcu_nmi_enter();				\
 		lockdep_hardirq_enter();			\
+		ftrace_count_nmi();				\
+		ftrace_nmi_handler_enter();			\
 	} while (0)
 
 #define nmi_exit()						\
 	do {							\
+		ftrace_nmi_handler_exit();			\
 		lockdep_hardirq_exit();				\
 		rcu_nmi_exit();					\
 		BUG_ON(!in_nmi());				\
 		__preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET);	\
-		ftrace_nmi_exit();				\
 		lockdep_on();					\
 		printk_nmi_exit();				\
 		arch_nmi_exit();				\
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -132,21 +132,26 @@ static void trace_hwlat_sample(struct hw
 }
 
 /*
+ * Count NMIs in nmi_enter(). Does not take timestamps
+ * because the timestamping callchain cannot be invoked
+ * from noinstr sections.
+ */
+noinstr void trace_hwlat_count_nmi(void)
+{
+	if (smp_processor_id() == nmi_cpu)
+		nmi_count++;
+}
+
+/*
  * Timestamping uses ktime_get_mono_fast(), the NMI safe access to
  * CLOCK_MONOTONIC.
  */
-void trace_hwlat_callback(bool enter)
+void trace_hwlat_timestamp(bool enter)
 {
-	if (smp_processor_id() != nmi_cpu)
-		return;
-
 	if (enter)
 		nmi_ts_start = ktime_get_mono_fast_ns();
 	else
 		nmi_total_ts += ktime_get_mono_fast_ns() - nmi_ts_start;
-
-	if (enter)
-		nmi_count++;
 }
 
 /**

Powered by blists - more mailing lists