[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200527155003.485332738@infradead.org>
Date: Wed, 27 May 2020 17:45:33 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...nel.org, will@...nel.org, tglx@...utronix.de
Cc: x86@...nel.org, linux-kernel@...r.kernel.org,
a.darwish@...utronix.de, rostedt@...dmis.org,
bigeasy@...utronix.de, peterz@...radead.org
Subject: [PATCH 6/6] x86/entry: Fix NMI vs IRQ state tracking
While the nmi_enter() users did
trace_hardirqs_{off_prepare,on_finish}() there was no matching
lockdep_hardirqs_*() calls to complete the picture.
Introduce idtentry_{enter,exit}_nmi() to enable proper IRQ state
tracking across the NMIs.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
arch/x86/entry/common.c | 32 ++++++++++++++++++++++++++++----
arch/x86/include/asm/idtentry.h | 3 +++
arch/x86/kernel/nmi.c | 7 ++-----
arch/x86/kernel/traps.c | 21 ++++++---------------
include/linux/hardirq.h | 22 ++++++++++++++++------
5 files changed, 55 insertions(+), 30 deletions(-)
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -550,7 +550,7 @@ SYSCALL_DEFINE0(ni_syscall)
* The return value must be fed into the rcu_exit argument of
* idtentry_exit_cond_rcu().
*/
-bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
+noinstr bool idtentry_enter_cond_rcu(struct pt_regs *regs)
{
if (user_mode(regs)) {
enter_from_user_mode();
@@ -619,7 +619,7 @@ static void idtentry_exit_cond_resched(s
* Counterpart to idtentry_enter_cond_rcu(). The return value of the entry
* function must be fed into the @rcu_exit argument.
*/
-void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
+noinstr void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
{
lockdep_assert_irqs_disabled();
@@ -663,7 +663,7 @@ void noinstr idtentry_exit_cond_rcu(stru
* Invokes enter_from_user_mode() to establish the proper context for
* NOHZ_FULL. Otherwise scheduling on exit would not be possible.
*/
-void noinstr idtentry_enter_user(struct pt_regs *regs)
+noinstr void idtentry_enter_user(struct pt_regs *regs)
{
enter_from_user_mode();
}
@@ -680,13 +680,37 @@ void noinstr idtentry_enter_user(struct
*
* Counterpart to idtentry_enter_user().
*/
-void noinstr idtentry_exit_user(struct pt_regs *regs)
+noinstr void idtentry_exit_user(struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
prepare_exit_to_usermode(regs);
}
+noinstr void idtentry_enter_nmi(struct pt_regs *regs)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ __nmi_enter();
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+ lockdep_hardirq_enter();
+}
+
+noinstr void idtentry_exit_nmi(struct pt_regs *regs)
+{
+ lockdep_hardirq_exit();
+ if (regs->flags & X86_EFLAGS_IF) {
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ instrumentation_end();
+ }
+ __nmi_exit();
+ if (regs->flags & X86_EFLAGS_IF)
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
#ifdef CONFIG_XEN_PV
#ifndef CONFIG_PREEMPTION
/*
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -16,6 +16,9 @@ void idtentry_exit_user(struct pt_regs *
bool idtentry_enter_cond_rcu(struct pt_regs *regs);
void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit);
+void idtentry_enter_nmi(struct pt_regs *regs);
+void idtentry_exit_nmi(struct pt_regs *regs);
+
/**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
* No error code pushed by hardware
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -330,7 +330,6 @@ static noinstr void default_do_nmi(struc
__this_cpu_write(last_nmi_rip, regs->ip);
instrumentation_begin();
- trace_hardirqs_off_finish();
handled = nmi_handle(NMI_LOCAL, regs);
__this_cpu_add(nmi_stats.normal, handled);
@@ -417,8 +416,6 @@ static noinstr void default_do_nmi(struc
unknown_nmi_error(reason, regs);
out:
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
instrumentation_end();
}
@@ -535,14 +532,14 @@ DEFINE_IDTENTRY_NMI(exc_nmi)
}
#endif
- nmi_enter();
+ idtentry_enter_nmi(regs);
inc_irq_stat(__nmi_count);
if (!ignore_nmis)
default_do_nmi(regs);
- nmi_exit();
+ idtentry_exit_nmi(regs);
#ifdef CONFIG_X86_64
if (unlikely(this_cpu_read(update_debug_stack))) {
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -387,7 +387,8 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
}
#endif
- nmi_enter();
+
+ idtentry_enter_nmi(regs);
instrumentation_begin();
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
@@ -632,15 +633,12 @@ DEFINE_IDTENTRY_RAW(exc_int3)
instrumentation_end();
idtentry_exit_user(regs);
} else {
- nmi_enter();
+ idtentry_enter_nmi(regs);
instrumentation_begin();
- trace_hardirqs_off_finish();
if (!do_int3(regs))
die("int3", regs, 0);
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
instrumentation_end();
- nmi_exit();
+ idtentry_exit_nmi(regs);
}
}
@@ -852,10 +850,7 @@ static void noinstr handle_debug(struct
static __always_inline void exc_debug_kernel(struct pt_regs *regs,
unsigned long dr6)
{
- nmi_enter();
- instrumentation_begin();
- trace_hardirqs_off_finish();
- instrumentation_end();
+ idtentry_enter_nmi(regs);
/*
* The SDM says "The processor clears the BTF flag when it
@@ -878,11 +873,7 @@ static __always_inline void exc_debug_ke
if (dr6)
handle_debug(regs, dr6, false);
- instrumentation_begin();
- if (regs->flags & X86_EFLAGS_IF)
- trace_hardirqs_on_prepare();
- instrumentation_end();
- nmi_exit();
+ idtentry_exit_nmi(regs);
}
static __always_inline void exc_debug_user(struct pt_regs *regs,
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -111,32 +111,42 @@ extern void rcu_nmi_exit(void);
/*
* nmi_enter() can nest up to 15 times; see NMI_BITS.
*/
-#define nmi_enter() \
+#define __nmi_enter() \
do { \
+ lockdep_off(); \
arch_nmi_enter(); \
printk_nmi_enter(); \
- lockdep_off(); \
BUG_ON(in_nmi() == NMI_MASK); \
__preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
rcu_nmi_enter(); \
- lockdep_hardirq_enter(); \
instrumentation_begin(); \
ftrace_nmi_enter(); \
instrumentation_end(); \
} while (0)
-#define nmi_exit() \
+#define nmi_enter() \
+ do { \
+ lockdep_hardirq_enter(); \
+ __nmi_enter(); \
+ } while (0)
+
+#define __nmi_exit() \
do { \
instrumentation_begin(); \
ftrace_nmi_exit(); \
instrumentation_end(); \
- lockdep_hardirq_exit(); \
rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
__preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
- lockdep_on(); \
printk_nmi_exit(); \
arch_nmi_exit(); \
+ lockdep_on(); \
+ } while (0)
+
+#define nmi_exit() \
+ do { \
+ __nmi_exit(); \
+ lockdep_hardirq_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */
Powered by blists - more mailing lists