[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lsq.1538257387.261794817@decadent.org.uk>
Date: Sat, 29 Sep 2018 22:43:07 +0100
From: Ben Hutchings <ben@...adent.org.uk>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
CC: akpm@...ux-foundation.org, "David Woodhouse" <dwmw@...zon.co.uk>,
"Greg Kroah-Hartman" <gregkh@...uxfoundation.org>,
"Thomas Gleixner" <tglx@...utronix.de>,
"Tom Lendacky" <thomas.lendacky@....com>,
"Borislav Petkov" <bp@...e.de>
Subject: [PATCH 3.16 039/131] x86/speculation: Add virtualized speculative
store bypass disable support
3.16.59-rc1 review patch. If anyone has any objections, please let me know.
------------------
From: Tom Lendacky <thomas.lendacky@....com>
commit 11fb0683493b2da112cd64c9dada221b52463bf7 upstream.
Some AMD processors only support a non-architectural means of enabling
speculative store bypass disable (SSBD). To allow a simplified view of
this to a guest, an architectural definition has been created through a new
CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f. With this, a
hypervisor can virtualize the existence of this definition and provide an
architectural method for using SSBD to a guest.
Add the new CPUID feature, the new MSR and update the existing SSBD
support to use this MSR when present.
Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Reviewed-by: Borislav Petkov <bp@...e.de>
Signed-off-by: David Woodhouse <dwmw@...zon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
[bwh: Backported to 3.16:
- This CPUID word is feature word 11
- Adjust filenames, context]
Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
arch/x86/include/asm/cpufeature.h | 1 +
arch/x86/include/uapi/asm/msr-index.h | 2 ++
arch/x86/kernel/cpu/bugs.c | 4 +++-
arch/x86/kernel/process.c | 13 ++++++++++++-
4 files changed, 18 insertions(+), 2 deletions(-)
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -255,6 +255,7 @@
#define X86_FEATURE_AMD_IBPB (11*32+12) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (11*32+14) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (11*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD (11*32+25) /* Virtualized Speculative Store Bypass Disable */
/*
* BUG word(s)
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -225,6 +225,8 @@
#define MSR_AMD64_IBSBRTARGET 0xc001103b
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
+
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -266,7 +266,9 @@ static void x86_amd_ssb_disable(void)
{
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
+ else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
wrmsrl(MSR_AMD64_LS_CFG, msrval);
}
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -326,6 +326,15 @@ static __always_inline void amd_set_core
}
#endif
+static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
+{
+ /*
+ * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
+ * so ssbd_tif_to_spec_ctrl() just works.
+ */
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+}
+
static __always_inline void intel_set_ssb_state(unsigned long tifn)
{
u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
@@ -335,7 +344,9 @@ static __always_inline void intel_set_ss
static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
{
- if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+ amd_set_ssb_virt_state(tifn);
+ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
amd_set_core_ssb_state(tifn);
else
intel_set_ssb_state(tifn);
Powered by blists - more mailing lists