[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <769e4725-754b-9185-7d91-c0f4dfb06b09@amd.com>
Date: Wed, 26 Sep 2018 19:11:03 +0000
From: "Lendacky, Thomas" <Thomas.Lendacky@....com>
To: Tim Chen <tim.c.chen@...ux.intel.com>,
Jiri Kosina <jikos@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>
CC: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Josh Poimboeuf <jpoimboe@...hat.com>,
Andrea Arcangeli <aarcange@...hat.com>,
David Woodhouse <dwmw@...zon.co.uk>,
Andi Kleen <ak@...ux.intel.com>,
Dave Hansen <dave.hansen@...el.com>,
Casey Schaufler <casey.schaufler@...el.com>,
Asit Mallick <asit.k.mallick@...el.com>,
Arjan van de Ven <arjan@...ux.intel.com>,
Jon Masters <jcm@...hat.com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"x86@...nel.org" <x86@...nel.org>
Subject: Re: [Patch v2 3/4] x86/speculation: Extend per process STIBP to AMD
cpus.
On 09/26/2018 12:24 PM, Tim Chen wrote:
> On 09/25/2018 05:43 PM, Tim Chen wrote:
>> From: Thomas Lendacky <Thomas.Lendacky@....com>
>>
>> We extend the app to app spectre v2 mitigation using STIBP
>> to the AMD cpus. We need to take care of special
>> cases for AMD cpu's update of SPEC_CTRL MSR to avoid double
>> writing of MSRs from update to SSBD and STIBP.
>
> Tom, if this patch looks okay to you, can I add your sign off?
Hi Tim,
Yup, that looks correct.
>
> Tim
>
>>
>> Originally-by: Thomas Lendacky <Thomas.Lendacky@....com>
>> Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
Signed-off-by: Tom Lendacky <thomas.lendacky@....com>
Thanks,
Tom
>> ---
>> arch/x86/kernel/process.c | 48 +++++++++++++++++++++++++++++++++++++----------
>> 1 file changed, 38 insertions(+), 10 deletions(-)
>>
>> diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
>> index cb24014..4a3a672 100644
>> --- a/arch/x86/kernel/process.c
>> +++ b/arch/x86/kernel/process.c
>> @@ -399,6 +399,10 @@ static __always_inline void set_spec_ctrl_state(unsigned long tifn)
>> {
>> u64 msr = x86_spec_ctrl_base;
>>
>> + /*
>> + * AMD cpu may have used a different method to update SSBD, so
>> + * we need to be sure we are using the SPEC_CTRL MSR for SSBD.
>> + */
>> if (static_cpu_has(X86_FEATURE_SSBD))
>> msr |= ssbd_tif_to_spec_ctrl(tifn);
>>
>> @@ -408,20 +412,45 @@ static __always_inline void set_spec_ctrl_state(unsigned long tifn)
>> wrmsrl(MSR_IA32_SPEC_CTRL, msr);
>> }
>>
>> -static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
>> +static __always_inline void __speculative_store_bypass_update(unsigned long tifp,
>> + unsigned long tifn)
>> {
>> - if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
>> - amd_set_ssb_virt_state(tifn);
>> - else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
>> - amd_set_core_ssb_state(tifn);
>> - else
>> - set_spec_ctrl_state(tifn);
>> + bool stibp = !!((tifp ^ tifn) & _TIF_STIBP);
>> + bool ssbd = !!((tifp ^ tifn) & _TIF_SSBD);
>> +
>> + if (!ssbd && !stibp)
>> + return;
>> +
>> + if (ssbd) {
>> + /*
>> + * For AMD, try these methods first. The ssbd variable will
>> + * reflect if the SPEC_CTRL MSR method is needed.
>> + */
>> + ssbd = false;
>> +
>> + if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
>> + amd_set_ssb_virt_state(tifn);
>> + else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
>> + amd_set_core_ssb_state(tifn);
>> + else
>> + ssbd = true;
>> + }
>> +
>> + /* Avoid a possible extra MSR write, recheck the flags */
>> + if (!ssbd && !stibp)
>> + return;
>> +
>> + set_spec_ctrl_state(tifn);
>> }
>>
>> void speculative_store_bypass_update(unsigned long tif)
>> {
>> + /*
>> + * On this path we're forcing the update, so use ~tif as the
>> + * previous flags.
>> + */
>> preempt_disable();
>> - __speculative_store_bypass_update(tif);
>> + __speculative_store_bypass_update(~tif, tif);
>> preempt_enable();
>> }
>>
>> @@ -457,8 +486,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
>> if ((tifp ^ tifn) & _TIF_NOCPUID)
>> set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
>>
>> - if ((tifp ^ tifn) & (_TIF_SSBD | _TIF_STIBP))
>> - __speculative_store_bypass_update(tifn);
>> + __speculative_store_bypass_update(tifp, tifn);
>> }
>>
>> /*
>>
>
Powered by blists - more mailing lists