lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180110010328.22163-8-andi@firstfloor.org>
Date:   Tue,  9 Jan 2018 17:03:27 -0800
From:   Andi Kleen <andi@...stfloor.org>
To:     tglx@...utronix.de
Cc:     x86@...nel.org, linux-kernel@...r.kernel.org,
        torvalds@...ux-foundation.org, dwmw@...zon.co.uk, pjt@...gle.com,
        luto@...nel.org, peterz@...radead.org, thomas.lendacky@....com,
        tim.c.chen@...ux.intel.com, gregkh@...ux-foundation.org,
        dave.hansen@...el.com, jikos@...nel.org,
        Andi Kleen <ak@...ux.intel.com>
Subject: [PATCH v1 7/8] x86/entry/clearregs: Add 64bit stubs to clear unused arguments regs

From: Andi Kleen <ak@...ux.intel.com>

The main system call code doesn't know how many arguments each
system call has. So generate stubs that do the clearing.

Set up macros to generate stubs to clear unused argument registers
for each system call in a 64bit kernel. This uses the syscall
argument count from the syscall tables added earlier.

Each system call will run through its stub which then clears
the registers not used for input arguments before jumping
to the real system calls. It also clears RAX.

We have to move all the __SYSCALL_* users atomically.
This is a larger patch, but it's difficult to do it
git bisect safe otherwise.

Longer term this setup will also allow to get rid
of the system call table, as it will be possible
to compute the entry point with a simple shift.
So far this is not done here.

Signed-off-by: Andi Kleen <ak@...ux.intel.com>
---
 arch/x86/entry/calling.h              | 24 ++++++++++++++++++++++++
 arch/x86/entry/entry_64.S             | 15 +++++++++++++++
 arch/x86/entry/syscall_32.c           |  4 ++--
 arch/x86/entry/syscall_64.c           |  5 +++--
 arch/x86/entry/syscalls/syscalltbl.sh | 15 ++++++++-------
 arch/x86/kernel/asm-offsets_32.c      |  2 +-
 arch/x86/kernel/asm-offsets_64.c      |  4 ++--
 arch/x86/um/sys_call_table_32.c       |  4 ++--
 arch/x86/um/sys_call_table_64.c       |  4 ++--
 9 files changed, 59 insertions(+), 18 deletions(-)

diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 9444e7623185..c89a8a8d195c 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -200,6 +200,30 @@ For 32-bit we have the following conventions - kernel is built with
 #endif
 	.endm
 
+	/* Clear unused argument registers */
+
+.macro CLEAR_ARGS num
+	/* we leave EAX around because it has been already checked */
+	.if \num < 6
+	xorq	%r9, %r9	# arg6
+	.endif
+	.if \num < 5
+	xorq	%r8, %r8	# arg5
+	.endif
+	.if \num < 4
+	xorl	%ecx, %ecx	# arg4
+	.endif
+	.if \num < 3
+	xorl	%edx, %edx	# arg3
+	.endif
+	.if \num < 2
+	xorl	%esi, %esi	# arg2
+	.endif
+	.if \num < 1
+	xorl	%edi, %edi	# arg1
+	.endif
+.endm
+
 /*
  * This is a sneaky trick to help the unwinder find pt_regs on the stack.  The
  * frame pointer is replaced with an encoded pointer to pt_regs.  The encoding
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 6ab4c2aaeabb..5b2456a30b17 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1755,6 +1755,21 @@ nmi_restore:
 	iretq
 END(nmi)
 
+/*
+ * Clear all argument registers not used by a system call.
+ */
+
+.macro gen_arg_stub sym, num
+ENTRY(san_args_\sym)
+	CLEAR_ARGS \num
+	xor	  %eax, %eax
+	jmp	   \sym
+END(san_args_\sym)
+.endm
+
+#define __SYSCALL_64(nr, sym, qual, num) gen_arg_stub sym, num
+#include <asm/syscalls_64.h>
+
 ENTRY(ignore_sysret)
 	UNWIND_HINT_EMPTY
 	mov	$-ENOSYS, %eax
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 95c294963612..b31e5c8b7ba7 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -7,11 +7,11 @@
 #include <asm/asm-offsets.h>
 #include <asm/syscall.h>
 
-#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_I386(nr, sym, qual, num) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_32.h>
 #undef __SYSCALL_I386
 
-#define __SYSCALL_I386(nr, sym, qual) [nr] = sym,
+#define __SYSCALL_I386(nr, sym, qual, num) [nr] = sym,
 
 extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index ad1ae014f943..963c9c14480f 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -10,11 +10,12 @@
 #define __SYSCALL_64_QUAL_(sym) sym
 #define __SYSCALL_64_QUAL_ptregs(sym) sym
 
-#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+#define __SYSCALL_64(nr, sym, qual, num) \
+	extern asmlinkage long __SYSCALL_64_QUAL_##qual(san_args_##sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 #include <asm/syscalls_64.h>
 #undef __SYSCALL_64
 
-#define __SYSCALL_64(nr, sym, qual) [nr] = __SYSCALL_64_QUAL_##qual(sym),
+#define __SYSCALL_64(nr, sym, qual, num) [nr] = __SYSCALL_64_QUAL_##qual(san_args_##sym),
 
 extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
diff --git a/arch/x86/entry/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh
index bb8a12f32610..79fff684d75e 100644
--- a/arch/x86/entry/syscalls/syscalltbl.sh
+++ b/arch/x86/entry/syscalls/syscalltbl.sh
@@ -18,7 +18,7 @@ syscall_macro() {
         qualifier=${entry#*/}
     fi
 
-    echo "__SYSCALL_${abi}($nr, $real_entry, $qualifier)"
+    echo "__SYSCALL_${abi}($nr, $real_entry, $qualifier, $num)"
 }
 
 emit() {
@@ -26,6 +26,7 @@ emit() {
     nr="$2"
     entry="$3"
     compat="$4"
+    num="$5"
 
     if [ "$abi" = "64" -a -n "$compat" ]; then
 	echo "a compat entry for a 64-bit syscall makes no sense" >&2
@@ -34,15 +35,15 @@ emit() {
 
     if [ -z "$compat" ]; then
 	if [ -n "$entry" ]; then
-	    syscall_macro "$abi" "$nr" "$entry"
+	    syscall_macro "$abi" "$nr" "$entry" "$num"
 	fi
     else
 	echo "#ifdef CONFIG_X86_32"
 	if [ -n "$entry" ]; then
-	    syscall_macro "$abi" "$nr" "$entry"
+	    syscall_macro "$abi" "$nr" "$entry" "$num"
 	fi
 	echo "#else"
-	syscall_macro "$abi" "$nr" "$compat"
+	syscall_macro "$abi" "$nr" "$compat" "$num"
 	echo "#endif"
     fi
 }
@@ -58,14 +59,14 @@ grep '^[0-9]' "$in" | sort -n | (
 	    # COMMON is the same as 64, except that we don't expect X32
 	    # programs to use it.  Our expectation has nothing to do with
 	    # any generated code, so treat them the same.
-	    emit 64 "$nr" "$entry" "$compat"
+	    emit 64 "$nr" "$entry" "$compat" "$num"
 	elif [ "$abi" = "X32" ]; then
 	    # X32 is equivalent to 64 on an X32-compatible kernel.
 	    echo "#ifdef CONFIG_X86_X32_ABI"
-	    emit 64 "$nr" "$entry" "$compat"
+	    emit 64 "$nr" "$entry" "$compat" "$num"
 	    echo "#endif"
 	elif [ "$abi" = "I386" ]; then
-	    emit "$abi" "$nr" "$entry" "$compat"
+	    emit "$abi" "$nr" "$entry" "$compat" "$num"
 	else
 	    echo "Unknown abi $abi" >&2
 	    exit 1
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index fa1261eefa16..13c7478bfe57 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -5,7 +5,7 @@
 
 #include <asm/ucontext.h>
 
-#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
+#define __SYSCALL_I386(nr, sym, qual, num) [nr] = 1,
 static char syscalls[] = {
 #include <asm/syscalls_32.h>
 };
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index bf51e51d808d..75d92b53240d 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -5,11 +5,11 @@
 
 #include <asm/ia32.h>
 
-#define __SYSCALL_64(nr, sym, qual) [nr] = 1,
+#define __SYSCALL_64(nr, sym, qual, num) [nr] = 1,
 static char syscalls_64[] = {
 #include <asm/syscalls_64.h>
 };
-#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
+#define __SYSCALL_I386(nr, sym, qual, num) [nr] = 1,
 static char syscalls_ia32[] = {
 #include <asm/syscalls_32.h>
 };
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 9649b5ad2ca2..50002d938cef 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -26,11 +26,11 @@
 
 #define old_mmap sys_old_mmap
 
-#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_I386(nr, sym, qual, num) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_32.h>
 
 #undef __SYSCALL_I386
-#define __SYSCALL_I386(nr, sym, qual) [ nr ] = sym,
+#define __SYSCALL_I386(nr, sym, qual, num) [ nr ] = sym,
 
 extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index c8bc7fb8cbd6..c39c5b3b8022 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -36,11 +36,11 @@
 #define stub_execveat sys_execveat
 #define stub_rt_sigreturn sys_rt_sigreturn
 
-#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_64(nr, sym, qual, num) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_64.h>
 
 #undef __SYSCALL_64
-#define __SYSCALL_64(nr, sym, qual) [ nr ] = sym,
+#define __SYSCALL_64(nr, sym, qual, num) [ nr ] = sym,
 
 extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
-- 
2.14.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ