[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1e4031d6642d6a835d33651fd5d8f614d95daee5.1539798901.git.tim.c.chen@linux.intel.com>
Date: Wed, 17 Oct 2018 10:59:38 -0700
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: Jiri Kosina <jikos@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>
Cc: Tim Chen <tim.c.chen@...ux.intel.com>,
Tom Lendacky <thomas.lendacky@....com>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Josh Poimboeuf <jpoimboe@...hat.com>,
Andrea Arcangeli <aarcange@...hat.com>,
David Woodhouse <dwmw@...zon.co.uk>,
Andi Kleen <ak@...ux.intel.com>,
Dave Hansen <dave.hansen@...el.com>,
Casey Schaufler <casey.schaufler@...el.com>,
Asit Mallick <asit.k.mallick@...el.com>,
Arjan van de Ven <arjan@...ux.intel.com>,
Jon Masters <jcm@...hat.com>, linux-kernel@...r.kernel.org,
x86@...nel.org
Subject: [Patch v3 10/13] x86/speculation: Add per thread STIBP flag
Add per thread STIBP flag. When context switching to a process thread that
has the STIBP flag, the STIBP bit in the SPEC_CTRL MSR will be turned
on to guard against application to application Spectre v2 attack. When
switching to a non-security sensitive thread that doesn't have STIBP flag,
the STIBP bit in the SPEC_CTRL MSR is turned off.
Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
---
arch/x86/include/asm/msr-index.h | 6 +++++-
arch/x86/include/asm/spec-ctrl.h | 12 ++++++++++++
arch/x86/include/asm/thread_info.h | 5 ++++-
arch/x86/kernel/process.c | 10 +++++++++-
4 files changed, 30 insertions(+), 3 deletions(-)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4731f0c..bd19452 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -41,7 +41,11 @@
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
-#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+#define SPEC_CTRL_STIBP_SHIFT 1 /*
+ * Single Thread Indirect Branch
+ * Predictor (STIBP) bit
+ */
+#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index 8e2f841..b593779 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}
+static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
+{
+ BUILD_BUG_ON(TIF_STIBP < SPEC_CTRL_STIBP_SHIFT);
+ return (tifn & _TIF_STIBP) >> (TIF_STIBP - SPEC_CTRL_STIBP_SHIFT);
+}
+
static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
{
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}
+static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+ BUILD_BUG_ON(TIF_STIBP < SPEC_CTRL_STIBP_SHIFT);
+ return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_STIBP - SPEC_CTRL_STIBP_SHIFT);
+}
+
static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
{
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2ff2a30..a3ec545 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -83,6 +83,7 @@ struct thread_info {
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
+#define TIF_STIBP 9 /* Single threaded indirect branch predict */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_PATCH_PENDING 13 /* pending live patching update */
@@ -110,6 +111,7 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_STIBP (1 << TIF_STIBP)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
@@ -146,7 +148,8 @@ struct thread_info {
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
+ _TIF_SSBD|_TIF_STIBP)
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 789f1bada..a65b456 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -406,13 +406,21 @@ static __always_inline void spec_ctrl_update_msr(unsigned long tifn)
if (static_cpu_has(X86_FEATURE_SSBD))
msr |= ssbd_tif_to_spec_ctrl(tifn);
+ /*
+ * Need STIBP defense against Spectre v2 attack
+ * if SMT is in use and we don't have enhanced IBRS.
+ */
+ if (static_branch_likely(&cpu_smt_enabled) &&
+ !static_branch_unlikely(&spectre_v2_enhanced_ibrs))
+ msr |= stibp_tif_to_spec_ctrl(tifn);
+
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}
static __always_inline void __speculation_ctrl_update(unsigned long tifp,
unsigned long tifn)
{
- bool updmsr = false;
+ bool updmsr = !!((tifp ^ tifn) & _TIF_STIBP);
/* Check for AMD cpu to see if it uses SPEC_CTRL MSR for SSBD */
if ((tifp ^ tifn) & _TIF_SSBD) {
--
2.9.4
Powered by blists - more mailing lists