[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lsq.1538257387.627348666@decadent.org.uk>
Date: Sat, 29 Sep 2018 22:43:07 +0100
From: Ben Hutchings <ben@...adent.org.uk>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
CC: akpm@...ux-foundation.org,
"Konrad Rzeszutek Wilk" <konrad.wilk@...cle.com>,
"Borislav Petkov" <bp@...e.de>,
"Greg Kroah-Hartman" <gregkh@...uxfoundation.org>,
"Thomas Gleixner" <tglx@...utronix.de>,
"David Woodhouse" <dwmw@...zon.co.uk>
Subject: [PATCH 3.16 034/131] x86/cpufeatures: Disentangle MSR_SPEC_CTRL
enumeration from IBRS
3.16.59-rc1 review patch. If anyone has any objections, please let me know.
------------------
From: Thomas Gleixner <tglx@...utronix.de>
commit 7eb8956a7fec3c1f0abc2a5517dada99ccc8a961 upstream.
The availability of the SPEC_CTRL MSR is enumerated by a CPUID bit on
Intel and implied by IBRS or STIBP support on AMD. That's just confusing
and in case an AMD CPU has IBRS not supported because the underlying
problem has been fixed but has another bit valid in the SPEC_CTRL MSR,
the thing falls apart.
Add a synthetic feature bit X86_FEATURE_MSR_SPEC_CTRL to denote the
availability on both Intel and AMD.
While at it replace the boot_cpu_has() checks with static_cpu_has() where
possible. This prevents late microcode loading from exposing SPEC_CTRL, but
late loading is already very limited as it does not reevaluate the
mitigation options and other bits and pieces. Having static_cpu_has() is
the simplest and least fragile solution.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Borislav Petkov <bp@...e.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>
Signed-off-by: David Woodhouse <dwmw@...zon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
[bwh: Backported to 3.16:
- Use the next available bit number in CPU feature word 7
- Adjust filename, context]
Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
arch/x86/include/asm/cpufeature.h | 1 +
arch/x86/kernel/cpu/bugs.c | 18 +++++++++++-------
arch/x86/kernel/cpu/common.c | 9 +++++++--
arch/x86/kernel/cpu/intel.c | 1 +
4 files changed, 20 insertions(+), 9 deletions(-)
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -196,6 +196,7 @@
#define X86_FEATURE_IBRS (7*32+16) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB (7*32+17) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP (7*32+18) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_MSR_SPEC_CTRL (7*32+19) /* "" MSR SPEC_CTRL is implemented */
#define X86_FEATURE_RETPOLINE (7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD (7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -121,7 +121,7 @@ void __init check_bugs(void)
* have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
* init code as it is not enumerated and depends on the family.
*/
- if (boot_cpu_has(X86_FEATURE_IBRS))
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
/* Select the proper spectre mitigation before patching alternatives */
@@ -206,7 +206,7 @@ u64 x86_spec_ctrl_get_default(void)
{
u64 msrval = x86_spec_ctrl_base;
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
return msrval;
}
@@ -216,10 +216,12 @@ void x86_spec_ctrl_set_guest(u64 guest_s
{
u64 host = x86_spec_ctrl_base;
- if (!boot_cpu_has(X86_FEATURE_IBRS))
+ /* Is MSR_SPEC_CTRL implemented ? */
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
return;
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ /* Intel controls SSB in MSR_SPEC_CTRL */
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
if (host != guest_spec_ctrl)
@@ -231,10 +233,12 @@ void x86_spec_ctrl_restore_host(u64 gues
{
u64 host = x86_spec_ctrl_base;
- if (!boot_cpu_has(X86_FEATURE_IBRS))
+ /* Is MSR_SPEC_CTRL implemented ? */
+ if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
return;
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ /* Intel controls SSB in MSR_SPEC_CTRL */
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
if (host != guest_spec_ctrl)
@@ -668,7 +672,7 @@ int arch_prctl_spec_ctrl_get(struct task
void x86_spec_ctrl_setup_ap(void)
{
- if (boot_cpu_has(X86_FEATURE_IBRS))
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -694,19 +694,24 @@ static void init_speculation_control(str
if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
set_cpu_cap(c, X86_FEATURE_IBRS);
set_cpu_cap(c, X86_FEATURE_IBPB);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
}
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
set_cpu_cap(c, X86_FEATURE_STIBP);
- if (cpu_has(c, X86_FEATURE_AMD_IBRS))
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ }
if (cpu_has(c, X86_FEATURE_AMD_IBPB))
set_cpu_cap(c, X86_FEATURE_IBPB);
- if (cpu_has(c, X86_FEATURE_AMD_STIBP))
+ if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
set_cpu_cap(c, X86_FEATURE_STIBP);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ }
}
void get_cpu_cap(struct cpuinfo_x86 *c)
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -119,6 +119,7 @@ static void early_init_intel(struct cpui
setup_clear_cpu_cap(X86_FEATURE_IBPB);
setup_clear_cpu_cap(X86_FEATURE_STIBP);
setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
setup_clear_cpu_cap(X86_FEATURE_SSBD);
}
Powered by blists - more mailing lists