lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1515624682-3556-1-git-send-email-dwmw@amazon.co.uk>
Date:   Wed, 10 Jan 2018 22:51:22 +0000
From:   David Woodhouse <dwmw@...zon.co.uk>
To:     Andi Kleen <ak@...ux.intel.com>
Cc:     Paul Turner <pjt@...gle.com>, LKML <linux-kernel@...r.kernel.org>,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Greg Kroah-Hartman <gregkh@...ux-foundation.org>,
        Tim Chen <tim.c.chen@...ux.intel.com>,
        Dave Hansen <dave.hansen@...el.com>, tglx@...utronix.de,
        Kees Cook <keescook@...gle.com>,
        Rik van Riel <riel@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>,
        Andy Lutomirski <luto@...capital.net>,
        Jiri Kosina <jikos@...nel.org>, gnomes@...rguk.ukuu.org.uk,
        x86@...nel.org, bp@...en8.de, rga@...zon.de
Subject: [PATCH] x86/retpoline: Fill return stack buffer on vmexit

In accordance with the Intel and AMD documentation, we need to overwrite
all entries in the RSB on exiting a guest, to prevent malicious branch
target predictions from affecting the host kernel. This is needed both
for retpoline and for IBRS.

Signed-off-by: David Woodhouse <dwmw@...zon.co.uk>
---
Untested in this form although it's a variant on what we've had already.
I have an army of machines willing to do my bidding but nested virt
is non-trivial and I figure I might as well post it as someone else
can probably test it in less than the time it takes me to work out how.

This implements the most pressing of the RSB stuffing documented
by dhansen (based our discussions) in https://goo.gl/pXbvBE

 arch/x86/include/asm/nospec-branch.h | 67 ++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/svm.c                   |  4 +++
 arch/x86/kvm/vmx.c                   |  4 +++
 3 files changed, 75 insertions(+)

diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 7d70ea9..e6a7ea0 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -7,6 +7,50 @@
 #include <asm/alternative-asm.h>
 #include <asm/cpufeatures.h>
 
+/*
+ * Use 32-N: 32 is the max return buffer size, but there should have been
+ * at a minimum two controlled calls already: one into the kernel from
+ * entry*.S and another into the function containing this macro. So N=2,
+ * thus 30.
+ */
+#define NUM_BRANCHES_TO_FILL	30
+
+/*
+ * Fill the CPU return stack buffer.
+ *
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
+ * infinite 'pause; jmp' loop to capture speculative execution.
+ *
+ * This is required in various cases for retpoline and IBRS-based
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+ * eliminate potentially bogus entries from the RSB, and sometimes
+ * purely to ensure that doesn't get empty, which on some CPUs would
+ * allow predictions from other (unwanted!) sources to be used.
+ *
+ * We define a CPP macro such that it can be used from both .S files and
+ * inline assembly. It's possible to do a .macro and then include that
+ * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
+ */
+#define __FILL_RETURN_BUFFER(reg, sp, uniq)	\
+	mov	$(NUM_BRANCHES_TO_FILL/2), reg;	\
+	.align	16;				\
+.Ldo_call1_ ## uniq:				\
+	call	.Ldo_call2_ ## uniq;		\
+.Ltrap1_ ## uniq:				\
+	pause;					\
+	jmp	.Ltrap1_ ## uniq;		\
+	.align	16;				\
+.Ldo_call2_ ## uniq:				\
+	call	.Ldo_loop_ ## uniq;		\
+.Ltrap2_ ## uniq:				\
+	pause;					\
+	jmp	.Ltrap2_ ## uniq;		\
+	.align	16;				\
+.Ldo_loop_ ## uniq:				\
+	dec	reg;				\
+	jnz	.Ldo_call1_ ## uniq;		\
+	add	$(BITS_PER_LONG/8)*NUM_BRANCHES_TO_FILL, sp;
+
 #ifdef __ASSEMBLY__
 
 /*
@@ -61,6 +105,14 @@
 #endif
 .endm
 
+ /*
+  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+  * monstrosity above, manually.
+  */
+.macro FILL_RETURN_BUFFER reg:req
+	__FILL_RETURN_BUFFER(\reg,%_ASM_SP,\@)
+.endm
+
 #else /* __ASSEMBLY__ */
 
 #if defined(CONFIG_X86_64) && defined(RETPOLINE)
@@ -115,5 +167,20 @@ enum spectre_v2_mitigation {
 	SPECTRE_V2_IBRS,
 };
 
+/*
+ * On VMEXIT we must ensure that no RSB predictions learned in the guest
+ * can be followed in the host, by overwriting the RSB completely. Both
+ * retpoline and IBRS mitigations for Spectre v2 need this; only on future
+ * CPUs with IBRS_ATT *might* it be avoided.
+ */
+static inline void vmexit_fill_RSB(void)
+{
+	unsigned long dummy;
+
+	asm volatile (ALTERNATIVE("",
+				  __stringify(__FILL_RETURN_BUFFER(%0, %1, _%=)),
+				  X86_FEATURE_RETPOLINE)
+		      : "=r" (dummy), ASM_CALL_CONSTRAINT : : "memory" );
+}
 #endif /* __ASSEMBLY__ */
 #endif /* __NOSPEC_BRANCH_H__ */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0e68f0b..2744b973 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -45,6 +45,7 @@
 #include <asm/debugreg.h>
 #include <asm/kvm_para.h>
 #include <asm/irq_remapping.h>
+#include <asm/nospec-branch.h>
 
 #include <asm/virtext.h>
 #include "trace.h"
@@ -4985,6 +4986,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
 		);
 
+	/* Eliminate branch target predictions from guest mode */
+	vmexit_fill_RSB();
+
 #ifdef CONFIG_X86_64
 	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 62ee436..d1e25db 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -50,6 +50,7 @@
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
 #include <asm/mmu_context.h>
+#include <asm/nospec-branch.h>
 
 #include "trace.h"
 #include "pmu.h"
@@ -9403,6 +9404,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
 	      );
 
+	/* Eliminate branch target predictions from guest mode */
+	vmexit_fill_RSB();
+
 	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
 	if (debugctlmsr)
 		update_debugctlmsr(debugctlmsr);
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ