[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <705b51cba5b5e7805aeb08af7f7d21e6ec897a17.1537920575.git.tim.c.chen@linux.intel.com>
Date: Tue, 25 Sep 2018 17:43:58 -0700
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: Jiri Kosina <jikos@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>
Cc: Thomas Lendacky <Thomas.Lendacky@....com>,
Tom Lendacky <thomas.lendacky@....com>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Josh Poimboeuf <jpoimboe@...hat.com>,
Andrea Arcangeli <aarcange@...hat.com>,
David Woodhouse <dwmw@...zon.co.uk>,
Andi Kleen <ak@...ux.intel.com>,
Dave Hansen <dave.hansen@...el.com>,
Casey Schaufler <casey.schaufler@...el.com>,
Asit Mallick <asit.k.mallick@...el.com>,
Arjan van de Ven <arjan@...ux.intel.com>,
Jon Masters <jcm@...hat.com>, linux-kernel@...r.kernel.org,
x86@...nel.org, Tim Chen <tim.c.chen@...ux.intel.com>
Subject: [Patch v2 3/4] x86/speculation: Extend per process STIBP to AMD cpus.
From: Thomas Lendacky <Thomas.Lendacky@....com>
We extend the app to app spectre v2 mitigation using STIBP
to the AMD cpus. We need to take care of special
cases for AMD cpu's update of SPEC_CTRL MSR to avoid double
writing of MSRs from update to SSBD and STIBP.
Originally-by: Thomas Lendacky <Thomas.Lendacky@....com>
Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
---
arch/x86/kernel/process.c | 48 +++++++++++++++++++++++++++++++++++++----------
1 file changed, 38 insertions(+), 10 deletions(-)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index cb24014..4a3a672 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -399,6 +399,10 @@ static __always_inline void set_spec_ctrl_state(unsigned long tifn)
{
u64 msr = x86_spec_ctrl_base;
+ /*
+ * AMD cpu may have used a different method to update SSBD, so
+ * we need to be sure we are using the SPEC_CTRL MSR for SSBD.
+ */
if (static_cpu_has(X86_FEATURE_SSBD))
msr |= ssbd_tif_to_spec_ctrl(tifn);
@@ -408,20 +412,45 @@ static __always_inline void set_spec_ctrl_state(unsigned long tifn)
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}
-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+static __always_inline void __speculative_store_bypass_update(unsigned long tifp,
+ unsigned long tifn)
{
- if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
- amd_set_ssb_virt_state(tifn);
- else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
- amd_set_core_ssb_state(tifn);
- else
- set_spec_ctrl_state(tifn);
+ bool stibp = !!((tifp ^ tifn) & _TIF_STIBP);
+ bool ssbd = !!((tifp ^ tifn) & _TIF_SSBD);
+
+ if (!ssbd && !stibp)
+ return;
+
+ if (ssbd) {
+ /*
+ * For AMD, try these methods first. The ssbd variable will
+ * reflect if the SPEC_CTRL MSR method is needed.
+ */
+ ssbd = false;
+
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+ amd_set_ssb_virt_state(tifn);
+ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ amd_set_core_ssb_state(tifn);
+ else
+ ssbd = true;
+ }
+
+ /* Avoid a possible extra MSR write, recheck the flags */
+ if (!ssbd && !stibp)
+ return;
+
+ set_spec_ctrl_state(tifn);
}
void speculative_store_bypass_update(unsigned long tif)
{
+ /*
+ * On this path we're forcing the update, so use ~tif as the
+ * previous flags.
+ */
preempt_disable();
- __speculative_store_bypass_update(tif);
+ __speculative_store_bypass_update(~tif, tif);
preempt_enable();
}
@@ -457,8 +486,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
if ((tifp ^ tifn) & _TIF_NOCPUID)
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
- if ((tifp ^ tifn) & (_TIF_SSBD | _TIF_STIBP))
- __speculative_store_bypass_update(tifn);
+ __speculative_store_bypass_update(tifp, tifn);
}
/*
--
2.9.4
Powered by blists - more mailing lists