[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7ec59a1a-4caf-24f6-3466-ee1d01594861@amd.com>
Date: Tue, 27 Nov 2018 17:25:08 +0000
From: "Lendacky, Thomas" <Thomas.Lendacky@....com>
To: Thomas Gleixner <tglx@...utronix.de>,
LKML <linux-kernel@...r.kernel.org>
CC: "x86@...nel.org" <x86@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Andy Lutomirski <luto@...nel.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Jiri Kosina <jkosina@...e.cz>,
Josh Poimboeuf <jpoimboe@...hat.com>,
Andrea Arcangeli <aarcange@...hat.com>,
David Woodhouse <dwmw@...zon.co.uk>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Andi Kleen <ak@...ux.intel.com>,
Dave Hansen <dave.hansen@...el.com>,
Casey Schaufler <casey.schaufler@...el.com>,
Asit Mallick <asit.k.mallick@...el.com>,
Arjan van de Ven <arjan@...ux.intel.com>,
Jon Masters <jcm@...hat.com>,
Waiman Long <longman9394@...il.com>,
Greg KH <gregkh@...uxfoundation.org>,
Dave Stewart <david.c.stewart@...el.com>,
Kees Cook <keescook@...omium.org>
Subject: Re: [patch V2 18/28] x86/speculation: Prepare for per task indirect
branch speculation control
On 11/25/2018 12:33 PM, Thomas Gleixner wrote:
> To avoid the overhead of STIBP always on, it's necessary to allow per task
> control of STIBP.
>
> Add a new task flag TIF_SPEC_IB and evaluate it during context switch if
> SMT is active and flag evaluation is enabled by the speculation control
> code. Add the conditional evaluation to x86_virt_spec_ctrl() as well so the
> guest/host switch works properly.
>
> This has no effect because TIF_SPEC_IB cannot be set yet and the static key
> which controls evaluation is off. Preparatory patch for adding the control
> code.
>
> [ tglx: Simplify the context switch logic and make the TIF evaluation
> depend on SMP=y and on the static key controlling the conditional
> update. Rename it to TIF_SPEC_IB because it controls both STIBP and
> IBPB ]
>
> Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
>
> ---
>
> v1 -> v2: Remove pointless include. Use consistent comments.
>
> ---
> arch/x86/include/asm/msr-index.h | 5 +++--
> arch/x86/include/asm/spec-ctrl.h | 12 ++++++++++++
> arch/x86/include/asm/thread_info.h | 5 ++++-
> arch/x86/kernel/cpu/bugs.c | 4 ++++
> arch/x86/kernel/process.c | 23 +++++++++++++++++++++--
> 5 files changed, 44 insertions(+), 5 deletions(-)
>
> --- a/arch/x86/include/asm/msr-index.h
> +++ b/arch/x86/include/asm/msr-index.h
> @@ -41,9 +41,10 @@
>
> #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
> #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
> -#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
> +#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
> +#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
> #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
> -#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
> +#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
>
> #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
> #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
> --- a/arch/x86/include/asm/spec-ctrl.h
> +++ b/arch/x86/include/asm/spec-ctrl.h
> @@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(
> return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
> }
>
> +static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
> +{
> + BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
> + return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
> +}
> +
> static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
> {
> BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
> return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
> }
>
> +static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
> +{
> + BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
> + return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
> +}
> +
> static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
> {
> return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
> --- a/arch/x86/include/asm/thread_info.h
> +++ b/arch/x86/include/asm/thread_info.h
> @@ -83,6 +83,7 @@ struct thread_info {
> #define TIF_SYSCALL_EMU 6 /* syscall emulation active */
> #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
> #define TIF_SECCOMP 8 /* secure computing */
> +#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
> #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
> #define TIF_UPROBE 12 /* breakpointed or singlestepping */
> #define TIF_PATCH_PENDING 13 /* pending live patching update */
> @@ -110,6 +111,7 @@ struct thread_info {
> #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
> #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
> #define _TIF_SECCOMP (1 << TIF_SECCOMP)
> +#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
> #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
> #define _TIF_UPROBE (1 << TIF_UPROBE)
> #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
> @@ -146,7 +148,8 @@ struct thread_info {
>
> /* flags to check in __switch_to() */
> #define _TIF_WORK_CTXSW \
> - (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
> + (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
> + _TIF_SSBD|_TIF_SPEC_IB)
>
> #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
> #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -148,6 +148,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl,
> static_cpu_has(X86_FEATURE_AMD_SSBD))
> hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
>
> + /* Conditional STIBP enabled? */
> + if (static_branch_unlikely(&switch_to_cond_stibp))
> + hostval |= stibp_tif_to_spec_ctrl(ti->flags);
> +
> if (hostval != guestval) {
> msrval = setguest ? guestval : hostval;
> wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
> --- a/arch/x86/kernel/process.c
> +++ b/arch/x86/kernel/process.c
> @@ -406,6 +406,11 @@ static __always_inline void spec_ctrl_up
> if (static_cpu_has(X86_FEATURE_SSBD))
> msr |= ssbd_tif_to_spec_ctrl(tifn);
I did some quick testing and found my original logic was flawed. Since
spec_ctrl_update_msr() can now be called for STIBP, an additional check
is needed to set the SSBD MSR bit.
Both X86_FEATURE_VIRT_SSBD and X86_FEATURE_LS_CFG_SSBD cause
X86_FEATURE_SSBD to be set. Before this patch, spec_ctrl_update_msr() was
only called if X86_FEATURE_SSBD was set and one of the other SSBD features
wasn't set. But now, STIBP can cause spec_ctrl_update_msr() to get called
and cause the SSBD MSR bit to be set when it shouldn't (could result in
a GP fault).
Thanks,
Tom
>
> + /* Only evaluate if conditional STIBP is enabled */
> + if (IS_ENABLED(CONFIG_SMP) &&
> + static_branch_unlikely(&switch_to_cond_stibp))
> + msr |= stibp_tif_to_spec_ctrl(tifn);
> +
> wrmsrl(MSR_IA32_SPEC_CTRL, msr);
> }
>
> @@ -418,10 +423,16 @@ static __always_inline void spec_ctrl_up
> static __always_inline void __speculation_ctrl_update(unsigned long tifp,
> unsigned long tifn)
> {
> + unsigned long tif_diff = tifp ^ tifn;
> bool updmsr = false;
>
> - /* If TIF_SSBD is different, select the proper mitigation method */
> - if ((tifp ^ tifn) & _TIF_SSBD) {
> + /*
> + * If TIF_SSBD is different, select the proper mitigation
> + * method. Note that if SSBD mitigation is disabled or permanentely
> + * enabled this branch can't be taken because nothing can set
> + * TIF_SSBD.
> + */
> + if (tif_diff & _TIF_SSBD) {
> if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
> amd_set_ssb_virt_state(tifn);
> else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
> @@ -430,6 +441,14 @@ static __always_inline void __speculatio
> updmsr = true;
> }
>
> + /*
> + * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
> + * otherwise avoid the MSR write.
> + */
> + if (IS_ENABLED(CONFIG_SMP) &&
> + static_branch_unlikely(&switch_to_cond_stibp))
> + updmsr |= !!(tif_diff & _TIF_SPEC_IB);
> +
> if (updmsr)
> spec_ctrl_update_msr(tifn);
> }
>
>
Powered by blists - more mailing lists