[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180723122418.000920675@linuxfoundation.org>
Date: Mon, 23 Jul 2018 14:42:24 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Tom Lendacky <thomas.lendacky@....com>,
Thomas Gleixner <tglx@...utronix.de>,
Borislav Petkov <bp@...e.de>,
David Woodhouse <dwmw@...zon.co.uk>,
"Srivatsa S. Bhat" <srivatsa@...il.mit.edu>,
"Matt Helsley (VMware)" <matt.helsley@...il.com>,
Alexey Makhalov <amakhalov@...are.com>,
Bo Gan <ganb@...are.com>
Subject: [PATCH 4.4 090/107] x86/speculation: Add virtualized speculative store bypass disable support
4.4-stable review patch. If anyone has any objections, please let me know.
------------------
From: Tom Lendacky <thomas.lendacky@....com>
commit 11fb0683493b2da112cd64c9dada221b52463bf7 upstream
Some AMD processors only support a non-architectural means of enabling
speculative store bypass disable (SSBD). To allow a simplified view of
this to a guest, an architectural definition has been created through a new
CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f. With this, a
hypervisor can virtualize the existence of this definition and provide an
architectural method for using SSBD to a guest.
Add the new CPUID feature, the new MSR and update the existing SSBD
support to use this MSR when present.
Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Borislav Petkov <bp@...e.de>
Signed-off-by: David Woodhouse <dwmw@...zon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
Signed-off-by: Srivatsa S. Bhat <srivatsa@...il.mit.edu>
Reviewed-by: Matt Helsley (VMware) <matt.helsley@...il.com>
Reviewed-by: Alexey Makhalov <amakhalov@...are.com>
Reviewed-by: Bo Gan <ganb@...are.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/include/asm/msr-index.h | 2 ++
arch/x86/kernel/cpu/bugs.c | 4 +++-
arch/x86/kernel/process.c | 13 ++++++++++++-
4 files changed, 18 insertions(+), 2 deletions(-)
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -269,6 +269,7 @@
#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -328,6 +328,8 @@
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
+
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -203,7 +203,9 @@ static void x86_amd_ssb_disable(void)
{
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
+ else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
wrmsrl(MSR_AMD64_LS_CFG, msrval);
}
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -308,6 +308,15 @@ static __always_inline void amd_set_core
}
#endif
+static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
+{
+ /*
+ * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
+ * so ssbd_tif_to_spec_ctrl() just works.
+ */
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+}
+
static __always_inline void intel_set_ssb_state(unsigned long tifn)
{
u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
@@ -317,7 +326,9 @@ static __always_inline void intel_set_ss
static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
{
- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+ amd_set_ssb_virt_state(tifn);
+ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
amd_set_core_ssb_state(tifn);
else
intel_set_ssb_state(tifn);
Powered by blists - more mailing lists