[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200821085348.369441600@infradead.org>
Date: Fri, 21 Aug 2020 10:47:42 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: linux-kernel@...r.kernel.org, mingo@...nel.org, will@...nel.org
Cc: npiggin@...il.com, elver@...gle.com, jgross@...e.com,
paulmck@...nel.org, rostedt@...dmis.org, rjw@...ysocki.net,
joel@...lfernandes.org, svens@...ux.ibm.com, tglx@...utronix.de,
peterz@...radead.org
Subject: [PATCH v2 04/11] cpuidle: Make CPUIDLE_FLAG_TLB_FLUSHED generic
This allows moving the leave_mm() call into generic code before
rcu_idle_enter(). Gets rid of more trace_*_rcuidle() users.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Reviewed-by: Steven Rostedt (VMware) <rostedt@...dmis.org>
Tested-by: Marco Elver <elver@...gle.com>
---
arch/x86/include/asm/mmu.h | 1 +
arch/x86/mm/tlb.c | 13 ++-----------
drivers/cpuidle/cpuidle.c | 4 ++++
drivers/idle/intel_idle.c | 16 ----------------
include/linux/cpuidle.h | 13 +++++++------
include/linux/mmu_context.h | 5 +++++
6 files changed, 19 insertions(+), 33 deletions(-)
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -59,5 +59,6 @@ typedef struct {
}
void leave_mm(int cpu);
+#define leave_mm leave_mm
#endif /* _ASM_X86_MMU_H */
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -555,21 +555,12 @@ void switch_mm_irqs_off(struct mm_struct
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
load_new_mm_cr3(next->pgd, new_asid, true);
- /*
- * NB: This gets called via leave_mm() in the idle path
- * where RCU functions differently. Tracing normally
- * uses RCU, so we need to use the _rcuidle variant.
- *
- * (There is no good reason for this. The idle code should
- * be rearranged to call this before rcu_idle_enter().)
- */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
} else {
/* The new ASID is already up to date. */
load_new_mm_cr3(next->pgd, new_asid, false);
- /* See above wrt _rcuidle. */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
+ trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
/* Make sure we write CR3 before loaded_mm. */
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/tick.h>
+#include <linux/mmu_context.h>
#include <trace/events/power.h>
#include "cpuidle.h"
@@ -230,6 +231,9 @@ int cpuidle_enter_state(struct cpuidle_d
broadcast = false;
}
+ if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+ leave_mm(dev->cpu);
+
/* Take note of the planned idle state. */
sched_idle_set_state(target_state);
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -90,14 +90,6 @@ static unsigned int mwait_substates __in
#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
/*
- * Set this flag for states where the HW flushes the TLB for us
- * and so we don't need cross-calls to keep it consistent.
- * If this flag is set, SW flushes the TLB, so even if the
- * HW doesn't do the flushing, this flag is safe to use.
- */
-#define CPUIDLE_FLAG_TLB_FLUSHED BIT(16)
-
-/*
* MWAIT takes an 8-bit "hint" in EAX "suggesting"
* the C-state (top nibble) and sub-state (bottom nibble)
* 0x00 means "MWAIT(C1)", 0x10 means "MWAIT(C2)" etc.
@@ -131,14 +123,6 @@ static __cpuidle int intel_idle(struct c
unsigned long eax = flg2MWAIT(state->flags);
unsigned long ecx = 1; /* break on interrupt flag */
bool tick;
- int cpu = smp_processor_id();
-
- /*
- * leave_mm() to avoid costly and often unnecessary wakeups
- * for flushing the user TLB's associated with the active mm.
- */
- if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
- leave_mm(cpu);
if (!static_cpu_has(X86_FEATURE_ARAT)) {
/*
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -75,12 +75,13 @@ struct cpuidle_state {
};
/* Idle State Flags */
-#define CPUIDLE_FLAG_NONE (0x00)
-#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
-#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
-#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
-#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
-#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
+#define CPUIDLE_FLAG_NONE (0x00)
+#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
+#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
+#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
+#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
+#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
+#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -3,10 +3,15 @@
#define _LINUX_MMU_CONTEXT_H
#include <asm/mmu_context.h>
+#include <asm/mmu.h>
/* Architectures that care about IRQ state in switch_mm can override this. */
#ifndef switch_mm_irqs_off
# define switch_mm_irqs_off switch_mm
#endif
+#ifndef leave_mm
+static inline void leave_mm(int cpu) { }
+#endif
+
#endif
Powered by blists - more mailing lists