[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.DEB.2.21.1810022000000.1435@nanos.tec.linutronix.de>
Date: Tue, 2 Oct 2018 21:02:59 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: Tim Chen <tim.c.chen@...ux.intel.com>
cc: Jiri Kosina <jikos@...nel.org>,
Thomas Lendacky <Thomas.Lendacky@....com>,
Tom Lendacky <thomas.lendacky@....com>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Josh Poimboeuf <jpoimboe@...hat.com>,
Andrea Arcangeli <aarcange@...hat.com>,
David Woodhouse <dwmw@...zon.co.uk>,
Andi Kleen <ak@...ux.intel.com>,
Dave Hansen <dave.hansen@...el.com>,
Casey Schaufler <casey.schaufler@...el.com>,
Asit Mallick <asit.k.mallick@...el.com>,
Arjan van de Ven <arjan@...ux.intel.com>,
Jon Masters <jcm@...hat.com>, linux-kernel@...r.kernel.org,
x86@...nel.org
Subject: Re: [Patch v2 3/4] x86/speculation: Extend per process STIBP to AMD
cpus.
On Tue, 25 Sep 2018, Tim Chen wrote:
> From: Thomas Lendacky <Thomas.Lendacky@....com>
>
> We extend the app to app spectre v2 mitigation using STIBP
> to the AMD cpus. We need to take care of special
> cases for AMD cpu's update of SPEC_CTRL MSR to avoid double
> writing of MSRs from update to SSBD and STIBP.
According to documentation changelogs want to be written in imperative
mood.
> Originally-by: Thomas Lendacky <Thomas.Lendacky@....com>
> Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
> ---
> arch/x86/kernel/process.c | 48 +++++++++++++++++++++++++++++++++++++----------
> 1 file changed, 38 insertions(+), 10 deletions(-)
>
> diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
> index cb24014..4a3a672 100644
> --- a/arch/x86/kernel/process.c
> +++ b/arch/x86/kernel/process.c
> @@ -399,6 +399,10 @@ static __always_inline void set_spec_ctrl_state(unsigned long tifn)
> {
> u64 msr = x86_spec_ctrl_base;
>
> + /*
> + * AMD cpu may have used a different method to update SSBD, so
> + * we need to be sure we are using the SPEC_CTRL MSR for SSBD.
This has nothing to do with AMD. If X86_FEATURE_SSBD is not set, the SSBD
bit is not to be touched.
> + */
> if (static_cpu_has(X86_FEATURE_SSBD))
> msr |= ssbd_tif_to_spec_ctrl(tifn);
>
> @@ -408,20 +412,45 @@ static __always_inline void set_spec_ctrl_state(unsigned long tifn)
> wrmsrl(MSR_IA32_SPEC_CTRL, msr);
> }
>
> -static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
> +static __always_inline void __speculative_store_bypass_update(unsigned long tifp,
> + unsigned long tifn)
> {
> - if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
> - amd_set_ssb_virt_state(tifn);
> - else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
> - amd_set_core_ssb_state(tifn);
> - else
> - set_spec_ctrl_state(tifn);
> + bool stibp = !!((tifp ^ tifn) & _TIF_STIBP);
> + bool ssbd = !!((tifp ^ tifn) & _TIF_SSBD);
> +
> + if (!ssbd && !stibp)
> + return;
> +
> + if (ssbd) {
> + /*
> + * For AMD, try these methods first. The ssbd variable will
> + * reflect if the SPEC_CTRL MSR method is needed.
> + */
> + ssbd = false;
> +
> + if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
> + amd_set_ssb_virt_state(tifn);
> + else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
> + amd_set_core_ssb_state(tifn);
> + else
> + ssbd = true;
> + }
> +
> + /* Avoid a possible extra MSR write, recheck the flags */
> + if (!ssbd && !stibp)
> + return;
> +
> + set_spec_ctrl_state(tifn);
Uuurgh. This is context switch code and it results in a horrible assembly
maze. Also the function name is bogus now. It's not only dealing with SSB
anymore. Please stop glueing stuff into the code as you see fit.
Something like the below creates halfways sensible code.
static __always_inline void spec_ctrl_update_msr(unsigned long tifn)
{
u64 msr = x86_spec_ctrl_base;
if (static_cpu_has(X86_FEATURE_SSBD))
msr |= ssbd_tif_to_spec_ctrl(tifn);
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}
static __always_inline void spec_ctrl_update(unsigned long tifp,
unsigned long tifn)
{
bool updmsr = !!((tifp ^ tifn) & _TIF_STIBP);
if ((tifp ^ tifn) & _TIF_SSBD) {
if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
amd_set_ssb_virt_state(tifn);
else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
amd_set_core_ssb_state(tifn);
else if (static_cpu_has(X86_FEATURE_SSBD))
updmsr = true;
}
if (updmsr)
spec_ctrl_update_msr(tifn);
}
void speculation_ctrl_update(unsigned long tif)
{
preempt_disable();
spec_ctrl_update(~tif, tif);
preempt_enable();
}
Hmm?
Thanks,
tglx
Powered by blists - more mailing lists