[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241108122909.763663-2-patryk.wlazlyn@linux.intel.com>
Date: Fri, 8 Nov 2024 13:29:07 +0100
From: Patryk Wlazlyn <patryk.wlazlyn@...ux.intel.com>
To: x86@...nel.org
Cc: linux-kernel@...r.kernel.org,
linux-pm@...r.kernel.org,
rafael.j.wysocki@...el.com,
len.brown@...el.com,
artem.bityutskiy@...ux.intel.com,
dave.hansen@...ux.intel.com,
patryk.wlazlyn@...ux.intel.com
Subject: [PATCH v3 1/3] x86/smp: Allow calling mwait_play_dead with arbitrary hint
The current implementation for looking up the mwait hint for the deepest
cstate depends on them to be continuous in range [0, NUM_SUBSTATES-1].
While that is correct on most Intel x86 platforms, it is not documented
and may not result in reaching the most optimized idle state on some of
them.
For example Intel's Sierra Forest report two C6 substates in cpuid leaf 5:
C6S (hint 0x22)
C6SP (hint 0x23)
Hints 0x20 and 0x21 are skipped entirely, causing the current
implementation to compute the wrong hint, when looking for the deepest
cstate for offlined CPU to enter. As a result, package with an offlined
CPU can never reach PC6.
Allow the idle driver to call mwait_play_dead() code with the forced
mwait hint, skipping the cpuid based computation.
Signed-off-by: Patryk Wlazlyn <patryk.wlazlyn@...ux.intel.com>
---
arch/x86/include/asm/smp.h | 6 ++++++
arch/x86/kernel/smpboot.c | 25 ++++++++++++++++++-------
2 files changed, 24 insertions(+), 7 deletions(-)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index ca073f40698f..fbd275d6661a 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -114,6 +114,7 @@ void wbinvd_on_cpu(int cpu);
int wbinvd_on_all_cpus(void);
void smp_kick_mwait_play_dead(void);
+int mwait_play_dead_with_hint(unsigned long hint);
void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
@@ -164,6 +165,11 @@ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
return (struct cpumask *)cpumask_of(0);
}
+
+static inline int mwait_play_dead_with_hint(unsigned long eax_hint)
+{
+ return 1;
+}
#endif /* CONFIG_SMP */
#ifdef CONFIG_DEBUG_NMI_SELFTEST
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0c35207320cb..44c40781bad6 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1270,13 +1270,14 @@ void play_dead_common(void)
local_irq_disable();
}
+int mwait_play_dead_with_hint(unsigned long eax_hint);
+
/*
* We need to flush the caches before going to sleep, lest we have
* dirty data in our caches when we come back up.
*/
-static inline void mwait_play_dead(void)
+static inline int mwait_play_dead(void)
{
- struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead);
unsigned int eax, ebx, ecx, edx;
unsigned int highest_cstate = 0;
unsigned int highest_subcstate = 0;
@@ -1284,13 +1285,13 @@ static inline void mwait_play_dead(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
- return;
+ return 1;
if (!this_cpu_has(X86_FEATURE_MWAIT))
- return;
+ return 1;
if (!this_cpu_has(X86_FEATURE_CLFLUSH))
- return;
+ return 1;
if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
- return;
+ return 1;
eax = CPUID_MWAIT_LEAF;
ecx = 0;
@@ -1314,6 +1315,13 @@ static inline void mwait_play_dead(void)
(highest_subcstate - 1);
}
+ return mwait_play_dead_with_hint(eax);
+}
+
+int mwait_play_dead_with_hint(unsigned long eax_hint)
+{
+ struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead);
+
/* Set up state for the kexec() hack below */
md->status = CPUDEAD_MWAIT_WAIT;
md->control = CPUDEAD_MWAIT_WAIT;
@@ -1333,7 +1341,7 @@ static inline void mwait_play_dead(void)
mb();
__monitor(md, 0, 0);
mb();
- __mwait(eax, 0);
+ __mwait(eax_hint, 0);
if (READ_ONCE(md->control) == CPUDEAD_MWAIT_KEXEC_HLT) {
/*
@@ -1353,6 +1361,9 @@ static inline void mwait_play_dead(void)
native_halt();
}
}
+
+ /* Never reached */
+ return 0;
}
/*
--
2.47.0
Powered by blists - more mailing lists