[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180104020019.1173-4-andi@firstfloor.org>
Date: Wed, 3 Jan 2018 18:00:10 -0800
From: Andi Kleen <andi@...stfloor.org>
To: tglx@...utronix.de
Cc: torvalds@...ux-foundation.org, gregkh@...ux-foundation.org,
linux-kernel@...r.kernel.org, tim.c.chen@...ux.intel.com,
Andi Kleen <ak@...ux.intel.com>
Subject: [PATCH v2 03/12] x86/retpoline/entry: Convert entry assembler indirect jumps
From: Andi Kleen <ak@...ux.intel.com>
Convert all indirect jumps in core 32/64bit entry assembler code to use
non speculative sequences.
Based on code from David Woodhouse and Tim Chen
Signed-off-by: Andi Kleen <ak@...ux.intel.com>
---
arch/x86/entry/entry_32.S | 5 +++--
arch/x86/entry/entry_64.S | 12 +++++++-----
2 files changed, 10 insertions(+), 7 deletions(-)
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index ace8f321a5a1..a4b88260d6f7 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,6 +44,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/frame.h>
+#include <asm/jump-asm.h>
.section .entry.text, "ax"
@@ -290,7 +291,7 @@ ENTRY(ret_from_fork)
/* kernel thread */
1: movl %edi, %eax
- call *%ebx
+ NOSPEC_CALL %ebx
/*
* A kernel thread is allowed to return here after successfully
* calling do_execve(). Exit to userspace to complete the execve()
@@ -919,7 +920,7 @@ common_exception:
movl %ecx, %es
TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer
- call *%edi
+ NOSPEC_CALL %edi
jmp ret_from_exception
END(common_exception)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f048e384ff54..486990fb3e4d 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -37,6 +37,7 @@
#include <asm/pgtable_types.h>
#include <asm/export.h>
#include <asm/frame.h>
+#include <asm/jump-asm.h>
#include <linux/err.h>
#include "calling.h"
@@ -191,7 +192,7 @@ ENTRY(entry_SYSCALL_64_trampoline)
*/
pushq %rdi
movq $entry_SYSCALL_64_stage2, %rdi
- jmp *%rdi
+ NOSPEC_JMP %rdi
END(entry_SYSCALL_64_trampoline)
.popsection
@@ -269,8 +270,9 @@ entry_SYSCALL_64_fastpath:
* This call instruction is handled specially in stub_ptregs_64.
* It might end up jumping to the slow path. If it jumps, RAX
* and all argument registers are clobbered.
- */
- call *sys_call_table(, %rax, 8)
+ */
+ movq sys_call_table(, %rax, 8), %rax
+ NOSPEC_CALL %rax
.Lentry_SYSCALL_64_after_fastpath_call:
movq %rax, RAX(%rsp)
@@ -442,7 +444,7 @@ ENTRY(stub_ptregs_64)
jmp entry_SYSCALL64_slow_path
1:
- jmp *%rax /* Called from C */
+ NOSPEC_JMP %rax /* Called from C */
END(stub_ptregs_64)
.macro ptregs_stub func
@@ -521,7 +523,7 @@ ENTRY(ret_from_fork)
1:
/* kernel thread */
movq %r12, %rdi
- call *%rbx
+ NOSPEC_CALL %rbx
/*
* A kernel thread is allowed to return here after successfully
* calling do_execve(). Exit to userspace to complete the execve()
--
2.14.3
Powered by blists - more mailing lists