[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180110035407.GA16912@outlook.office365.com>
Date: Tue, 9 Jan 2018 19:54:08 -0800
From: Andrei Vagin <avagin@...tuozzo.com>
To: "Woodhouse, David" <dwmw@...zon.co.uk>
Cc: Andi Kleen <ak@...ux.intel.com>, Paul Turner <pjt@...gle.com>,
LKML <linux-kernel@...r.kernel.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Greg Kroah-Hartman <gregkh@...ux-foundation.org>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Dave Hansen <dave.hansen@...el.com>, tglx@...utronix.de,
Kees Cook <keescook@...gle.com>,
Rik van Riel <riel@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Andy Lutomirski <luto@...capital.net>,
Jiri Kosina <jikos@...nel.org>, gnomes@...rguk.ukuu.org.uk,
x86@...nel.org
Subject: Re: [v7, 05/11] x86/retpoline/entry: Convert entry assembler
indirect jumps
Hi,
In my test environment, the kernel with this patch crashes.
https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/commit/?h=x86/pti&id=f3433c1010c6af61c9897f0f0447f81b991feac1
My config and a kernel log are attached.
[ 1.985901] Freeing unused kernel memory: 524K
[ 1.987505] rodata_test: all tests were successful
[ 2.019787] kernel tried to execute NX-protected page - exploit attempt? (uid: 0)
[ 2.023023] BUG: unable to handle kernel paging request at fffffe0000007000
[ 2.027524] IP: 0xfffffe0000007000
[ 2.029371] PGD 13ffda067 P4D 13ffda067 PUD 13ffcf067 PMD 13ffce067 PTE 800000013fc09063
[ 2.032847] Oops: 0011 [#1] SMP PTI
[ 2.034598] Modules linked in:
[ 2.036420] CPU: 0 PID: 1 Comm: init Not tainted 4.14.0-00209-gf3433c1010c6 #7
[ 2.039005] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1.fc26 04/01/2014
[ 2.041685] task: ffff96dc3ab88000 task.stack: ffffb7aa4062c000
[ 2.044508] RIP: 0010:0xfffffe0000007000
[ 2.046511] RSP: 0018:ffffb7aa4062ffd0 EFLAGS: 00010082
[ 2.049264] RAX: 000000000000000c RBX: 0000000000000001 RCX: 00007fe8a02f8889
[ 2.054181] RDX: 000000000000004d RSI: 0000000000000041 RDI: ffffffffb9a00010
[ 2.056528] RBP: 000055b931deb040 R08: 0000000000000008 R09: 00007fe8a02fdfc4
[ 2.058622] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000009
[ 2.060367] R13: 00007fe8a02df3a0 R14: 0000000000000001 R15: 0000000000001000
[ 2.062107] FS: 0000000000000000(0000) GS:ffff96dc3fc00000(0000) knlGS:0000000000000000
[ 2.063949] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 2.065207] CR2: fffffe0000007000 CR3: 00000001393d2002 CR4: 00000000003606f0
[ 2.066768] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 2.068190] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[ 2.069406] Call Trace:
[ 2.069864] Code: 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 <00> 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
[ 2.073535] RIP: 0xfffffe0000007000 RSP: ffffb7aa4062ffd0
[ 2.074540] CR2: fffffe0000007000
[ 2.075165] ---[ end trace 394308f539cb80d2 ]--
Thanks,
Andrei
On Tue, Jan 09, 2018 at 02:43:11PM +0000, Woodhouse, David wrote:
> Convert indirect jumps in core 32/64bit entry assembler code to use
> non-speculative sequences when CONFIG_RETPOLINE is enabled.
>
> Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return
> address after the 'call' instruction must be *precisely* at the
> .Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work,
> and the use of alternatives will mess that up unless we play horrid
> games to prepend with NOPs and make the variants the same length. It's
> not worth it; in the case where we ALTERNATIVE out the retpoline, the
> first instruction at __x86.indirect_thunk.rax is going to be a bare
> jmp *%rax anyway.
>
> Signed-off-by: David Woodhouse <dwmw@...zon.co.uk>
> Acked-By: Arjan van de Ven <arjan@...ux.intel.com>
> Acked-by: Ingo Molnar <mingo@...nel.org>
> Cc: gnomes@...rguk.ukuu.org.uk
> Cc: Rik van Riel <riel@...hat.com>
> Cc: Andi Kleen <ak@...ux.intel.com>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: Linus Torvalds <torvalds@...ux-foundation.org>
> Cc: Jiri Kosina <jikos@...nel.org>
> Cc: Andy Lutomirski <luto@...capital.net>
> Cc: Dave Hansen <dave.hansen@...el.com>
> Cc: Kees Cook <keescook@...gle.com>
> Cc: Tim Chen <tim.c.chen@...ux.intel.com>
> Cc: Greg Kroah-Hartman <gregkh@...ux-foundation.org>
> Cc: Paul Turner <pjt@...gle.com>
> ---
> arch/x86/entry/entry_32.S | 5 +++--
> arch/x86/entry/entry_64.S | 12 +++++++++---
> 2 files changed, 12 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
> index ace8f32..a1f28a5 100644
> --- a/arch/x86/entry/entry_32.S
> +++ b/arch/x86/entry/entry_32.S
> @@ -44,6 +44,7 @@
> #include <asm/asm.h>
> #include <asm/smap.h>
> #include <asm/frame.h>
> +#include <asm/nospec-branch.h>
>
> .section .entry.text, "ax"
>
> @@ -290,7 +291,7 @@ ENTRY(ret_from_fork)
>
> /* kernel thread */
> 1: movl %edi, %eax
> - call *%ebx
> + CALL_NOSPEC %ebx
> /*
> * A kernel thread is allowed to return here after successfully
> * calling do_execve(). Exit to userspace to complete the execve()
> @@ -919,7 +920,7 @@ common_exception:
> movl %ecx, %es
> TRACE_IRQS_OFF
> movl %esp, %eax # pt_regs pointer
> - call *%edi
> + CALL_NOSPEC %edi
> jmp ret_from_exception
> END(common_exception)
>
> diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
> index ed31d00..59874bc 100644
> --- a/arch/x86/entry/entry_64.S
> +++ b/arch/x86/entry/entry_64.S
> @@ -37,6 +37,7 @@
> #include <asm/pgtable_types.h>
> #include <asm/export.h>
> #include <asm/frame.h>
> +#include <asm/nospec-branch.h>
> #include <linux/err.h>
>
> #include "calling.h"
> @@ -187,7 +188,7 @@ ENTRY(entry_SYSCALL_64_trampoline)
> */
> pushq %rdi
> movq $entry_SYSCALL_64_stage2, %rdi
> - jmp *%rdi
> + JMP_NOSPEC %rdi
> END(entry_SYSCALL_64_trampoline)
>
> .popsection
> @@ -266,7 +267,12 @@ entry_SYSCALL_64_fastpath:
> * It might end up jumping to the slow path. If it jumps, RAX
> * and all argument registers are clobbered.
> */
> +#ifdef CONFIG_RETPOLINE
> + movq sys_call_table(, %rax, 8), %rax
> + call __x86_indirect_thunk_rax
> +#else
> call *sys_call_table(, %rax, 8)
> +#endif
> .Lentry_SYSCALL_64_after_fastpath_call:
>
> movq %rax, RAX(%rsp)
> @@ -438,7 +444,7 @@ ENTRY(stub_ptregs_64)
> jmp entry_SYSCALL64_slow_path
>
> 1:
> - jmp *%rax /* Called from C */
> + JMP_NOSPEC %rax /* Called from C */
> END(stub_ptregs_64)
>
> .macro ptregs_stub func
> @@ -517,7 +523,7 @@ ENTRY(ret_from_fork)
> 1:
> /* kernel thread */
> movq %r12, %rdi
> - call *%rbx
> + CALL_NOSPEC %rbx
> /*
> * A kernel thread is allowed to return here after successfully
> * calling do_execve(). Exit to userspace to complete the execve()
View attachment "dmesg" of type "text/plain" (28905 bytes)
View attachment "config" of type "text/plain" (95662 bytes)
Powered by blists - more mailing lists