lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <cf7e8b0b-4195-6fea-df75-cb9ac2023a1d@linux.intel.com>
Date:   Wed, 26 Sep 2018 10:24:11 -0700
From:   Tim Chen <tim.c.chen@...ux.intel.com>
To:     Jiri Kosina <jikos@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>
Cc:     Thomas Lendacky <Thomas.Lendacky@....com>,
        Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Josh Poimboeuf <jpoimboe@...hat.com>,
        Andrea Arcangeli <aarcange@...hat.com>,
        David Woodhouse <dwmw@...zon.co.uk>,
        Andi Kleen <ak@...ux.intel.com>,
        Dave Hansen <dave.hansen@...el.com>,
        Casey Schaufler <casey.schaufler@...el.com>,
        Asit Mallick <asit.k.mallick@...el.com>,
        Arjan van de Ven <arjan@...ux.intel.com>,
        Jon Masters <jcm@...hat.com>, linux-kernel@...r.kernel.org,
        x86@...nel.org
Subject: Re: [Patch v2 3/4] x86/speculation: Extend per process STIBP to AMD
 cpus.

On 09/25/2018 05:43 PM, Tim Chen wrote:
> From: Thomas Lendacky <Thomas.Lendacky@....com>
> 
> We extend the app to app spectre v2 mitigation using STIBP
> to the AMD cpus. We need to take care of special
> cases for AMD cpu's update of SPEC_CTRL MSR to avoid double
> writing of MSRs from update to SSBD and STIBP.

Tom, if this patch looks okay to you, can I add your sign off?

Tim

> 
> Originally-by: Thomas Lendacky <Thomas.Lendacky@....com>
> Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
> ---
>  arch/x86/kernel/process.c | 48 +++++++++++++++++++++++++++++++++++++----------
>  1 file changed, 38 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
> index cb24014..4a3a672 100644
> --- a/arch/x86/kernel/process.c
> +++ b/arch/x86/kernel/process.c
> @@ -399,6 +399,10 @@ static __always_inline void set_spec_ctrl_state(unsigned long tifn)
>  {
>  	u64 msr = x86_spec_ctrl_base;
>  
> +	/*
> +	 * AMD cpu may have used a different method to update SSBD, so
> +	 * we need to be sure we are using the SPEC_CTRL MSR for SSBD.
> +	 */
>  	if (static_cpu_has(X86_FEATURE_SSBD))
>  		msr |= ssbd_tif_to_spec_ctrl(tifn);
>  
> @@ -408,20 +412,45 @@ static __always_inline void set_spec_ctrl_state(unsigned long tifn)
>  	wrmsrl(MSR_IA32_SPEC_CTRL, msr);
>  }
>  
> -static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
> +static __always_inline void __speculative_store_bypass_update(unsigned long tifp,
> +							      unsigned long tifn)
>  {
> -	if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
> -		amd_set_ssb_virt_state(tifn);
> -	else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
> -		amd_set_core_ssb_state(tifn);
> -	else
> -		set_spec_ctrl_state(tifn);
> +	bool stibp = !!((tifp ^ tifn) & _TIF_STIBP);
> +	bool ssbd = !!((tifp ^ tifn) & _TIF_SSBD);
> +
> +	if (!ssbd && !stibp)
> +		return;
> +
> +	if (ssbd) {
> +		/*
> +		 * For AMD, try these methods first.  The ssbd variable will
> +		 * reflect if the SPEC_CTRL MSR method is needed.
> +		 */
> +		ssbd = false;
> +
> +		if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
> +			amd_set_ssb_virt_state(tifn);
> +		else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
> +			amd_set_core_ssb_state(tifn);
> +		else
> +			ssbd = true;
> +	}
> +
> +	/* Avoid a possible extra MSR write, recheck the flags */
> +	if (!ssbd && !stibp)
> +		return;
> +
> +	set_spec_ctrl_state(tifn);
>  }
>  
>  void speculative_store_bypass_update(unsigned long tif)
>  {
> +	/*
> +	 * On this path we're forcing the update, so use ~tif as the
> +	 * previous flags.
> +	 */
>  	preempt_disable();
> -	__speculative_store_bypass_update(tif);
> +	__speculative_store_bypass_update(~tif, tif);
>  	preempt_enable();
>  }
>  
> @@ -457,8 +486,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
>  	if ((tifp ^ tifn) & _TIF_NOCPUID)
>  		set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
>  
> -	if ((tifp ^ tifn) & (_TIF_SSBD | _TIF_STIBP))
> -		__speculative_store_bypass_update(tifn);
> +	__speculative_store_bypass_update(tifp, tifn);
>  }
>  
>  /*
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ