[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <151632013524.21271.4516878993060956547.stgit@dwillia2-desk3.amr.corp.intel.com>
Date: Thu, 18 Jan 2018 16:02:15 -0800
From: Dan Williams <dan.j.williams@...el.com>
To: linux-kernel@...r.kernel.org
Cc: linux-arch@...r.kernel.org, Kees Cook <keescook@...omium.org>,
kernel-hardening@...ts.openwall.com, gregkh@...uxfoundation.org,
x86@...nel.org, Ingo Molnar <mingo@...hat.com>,
Al Viro <viro@...iv.linux.org.uk>,
"H. Peter Anvin" <hpa@...or.com>, tglx@...utronix.de,
torvalds@...ux-foundation.org, akpm@...ux-foundation.org,
alan@...ux.intel.com
Subject: [PATCH v4 06/10] x86,
get_user: use pointer masking to limit speculation
Quoting Linus:
I do think that it would be a good idea to very expressly document
the fact that it's not that the user access itself is unsafe. I do
agree that things like "get_user()" want to be protected, but not
because of any direct bugs or problems with get_user() and friends,
but simply because get_user() is an excellent source of a pointer
that is obviously controlled from a potentially attacking user
space. So it's a prime candidate for then finding _subsequent_
accesses that can then be used to perturb the cache.
Unlike the '__get_user' case 'get_user' includes the address limit check
near the pointer de-reference. With that locality the speculation can be
mitigated with pointer narrowing rather than a barrier. Where the
narrowing is performed by:
cmp %limit, %ptr
sbb %mask, %mask
and %mask, %ptr
With respect to speculation the value of %ptr is either less than %limit
or NULL.
Co-developed-by: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Al Viro <viro@...iv.linux.org.uk>
Cc: Kees Cook <keescook@...omium.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: "H. Peter Anvin" <hpa@...or.com>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: x86@...nel.org
Signed-off-by: Dan Williams <dan.j.williams@...el.com>
---
arch/x86/include/asm/smap.h | 17 +++++++++++++++++
arch/x86/lib/getuser.S | 5 +++++
2 files changed, 22 insertions(+)
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index db333300bd4b..2b4ad4c6a226 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -25,6 +25,23 @@
#include <asm/alternative-asm.h>
+/*
+ * MASK_NOSPEC - sanitize the value of a user controlled value with
+ * respect to speculation
+ *
+ * In the get_user path once we have determined that the pointer is
+ * below the current address limit sanitize its value with respect to
+ * speculation. In the case when the pointer is above the address limit
+ * this directs the cpu to speculate with a NULL ptr rather than
+ * something targeting kernel memory.
+ *
+ * assumes CF is set from a previous 'cmp TASK_addr_limit, %ptr'
+ */
+.macro MASK_NOSPEC mask val
+ sbb \mask, \mask
+ and \mask, \val
+.endm
+
#ifdef CONFIG_X86_SMAP
#define ASM_CLAC \
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index c97d935a29e8..07d0e8a28b17 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -40,6 +40,7 @@ ENTRY(__get_user_1)
mov PER_CPU_VAR(current_task), %_ASM_DX
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
+ MASK_NOSPEC %_ASM_DX, %_ASM_AX
ASM_STAC
1: movzbl (%_ASM_AX),%edx
xor %eax,%eax
@@ -54,6 +55,7 @@ ENTRY(__get_user_2)
mov PER_CPU_VAR(current_task), %_ASM_DX
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
+ MASK_NOSPEC %_ASM_DX, %_ASM_AX
ASM_STAC
2: movzwl -1(%_ASM_AX),%edx
xor %eax,%eax
@@ -68,6 +70,7 @@ ENTRY(__get_user_4)
mov PER_CPU_VAR(current_task), %_ASM_DX
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
+ MASK_NOSPEC %_ASM_DX, %_ASM_AX
ASM_STAC
3: movl -3(%_ASM_AX),%edx
xor %eax,%eax
@@ -83,6 +86,7 @@ ENTRY(__get_user_8)
mov PER_CPU_VAR(current_task), %_ASM_DX
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
+ MASK_NOSPEC %_ASM_DX, %_ASM_AX
ASM_STAC
4: movq -7(%_ASM_AX),%rdx
xor %eax,%eax
@@ -94,6 +98,7 @@ ENTRY(__get_user_8)
mov PER_CPU_VAR(current_task), %_ASM_DX
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user_8
+ MASK_NOSPEC %_ASM_DX, %_ASM_AX
ASM_STAC
4: movl -7(%_ASM_AX),%edx
5: movl -3(%_ASM_AX),%ecx
Powered by blists - more mailing lists