[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1515117669-24787-4-git-send-email-dwmw@amazon.co.uk>
Date: Fri, 5 Jan 2018 02:00:59 +0000
From: David Woodhouse <dwmw@...zon.co.uk>
To: Andi Kleen <ak@...ux.intel.com>
Cc: Paul Turner <pjt@...gle.com>, LKML <linux-kernel@...r.kernel.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Greg Kroah-Hartman <gregkh@...ux-foundation.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Dave Hansen <dave.hansen@...el.com>, tglx@...utronix.de,
Kees Cook <keescook@...gle.com>,
Rik van Riel <riel@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Andy Lutomirski <luto@...capital.net>,
Jiri Kosina <jikos@...nel.org>, gnomes@...rguk.ukuu.org.uk
Subject: [PATCH v4 03/13] x86/retpoline/entry: Convert entry assembler indirect jumps
Convert indirect jumps in core 32/64bit entry assembler code to use
non-speculative sequences when CONFIG_RETPOLINE is enabled.
KPTI complicates this a little; the one in entry_SYSCALL_64_trampoline
can't just jump to the thunk because the thunk isn't mapped. So it gets
its own copy of the thunk, inline.
Don't use NOSPEC_CALL in entry_SYSCALL_64_fastpath because the return
address after the 'call' instruction must be *precisely* at the
.Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work,
and the use of alternatives will mess that up unless we play horrid
games to prepend with NOPs and make the variants the same length. It's
not worth it; in the case where we ALTERNATIVE out the retpoline, the
first instruction at __x86.indirect_thunk.rax is going to be a bare
jmp *%rax anyway.
Signed-off-by: David Woodhouse <dwmw@...zon.co.uk>
---
arch/x86/entry/entry_32.S | 5 +++--
arch/x86/entry/entry_64.S | 22 +++++++++++++++++++---
2 files changed, 22 insertions(+), 5 deletions(-)
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index ace8f32..abd1e5d 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,6 +44,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
.section .entry.text, "ax"
@@ -290,7 +291,7 @@ ENTRY(ret_from_fork)
/* kernel thread */
1: movl %edi, %eax
- call *%ebx
+ NOSPEC_CALL ebx
/*
* A kernel thread is allowed to return here after successfully
* calling do_execve(). Exit to userspace to complete the execve()
@@ -919,7 +920,7 @@ common_exception:
movl %ecx, %es
TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer
- call *%edi
+ NOSPEC_CALL edi
jmp ret_from_exception
END(common_exception)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f048e38..dbca433 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -37,6 +37,7 @@
#include <asm/pgtable_types.h>
#include <asm/export.h>
#include <asm/frame.h>
+#include <asm/nospec-branch.h>
#include <linux/err.h>
#include "calling.h"
@@ -191,7 +192,17 @@ ENTRY(entry_SYSCALL_64_trampoline)
*/
pushq %rdi
movq $entry_SYSCALL_64_stage2, %rdi
- jmp *%rdi
+ /*
+ * Open-code the retpoline from retpoline.S, because we can't
+ * just jump to it directly.
+ */
+ ALTERNATIVE "call 2f", "jmp *%rdi", X86_BUG_NO_RETPOLINE
+1:
+ lfence
+ jmp 1b
+2:
+ mov %rdi, (%rsp)
+ ret
END(entry_SYSCALL_64_trampoline)
.popsection
@@ -270,7 +281,12 @@ entry_SYSCALL_64_fastpath:
* It might end up jumping to the slow path. If it jumps, RAX
* and all argument registers are clobbered.
*/
+#ifdef CONFIG_RETPOLINE
+ movq sys_call_table(, %rax, 8), %rax
+ call __x86.indirect_thunk.rax
+#else
call *sys_call_table(, %rax, 8)
+#endif
.Lentry_SYSCALL_64_after_fastpath_call:
movq %rax, RAX(%rsp)
@@ -442,7 +458,7 @@ ENTRY(stub_ptregs_64)
jmp entry_SYSCALL64_slow_path
1:
- jmp *%rax /* Called from C */
+ NOSPEC_JMP rax /* Called from C */
END(stub_ptregs_64)
.macro ptregs_stub func
@@ -521,7 +537,7 @@ ENTRY(ret_from_fork)
1:
/* kernel thread */
movq %r12, %rdi
- call *%rbx
+ NOSPEC_CALL rbx
/*
* A kernel thread is allowed to return here after successfully
* calling do_execve(). Exit to userspace to complete the execve()
--
2.7.4
Powered by blists - more mailing lists