lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220916135953.1320601-3-keescook@chromium.org>
Date:   Fri, 16 Sep 2022 06:59:55 -0700
From:   Kees Cook <keescook@...omium.org>
To:     Matthew Wilcox <willy@...radead.org>
Cc:     Kees Cook <keescook@...omium.org>,
        Uladzislau Rezki <urezki@...il.com>,
        Yu Zhao <yuzhao@...gle.com>, dev@...-flo.net,
        Peter Zijlstra <peterz@...radead.org>,
        Josh Poimboeuf <jpoimboe@...nel.org>,
        Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
        stable@...r.kernel.org, Andrew Morton <akpm@...ux-foundation.org>,
        Ingo Molnar <mingo@...hat.com>, linux-kernel@...r.kernel.org,
        linux-perf-users@...r.kernel.org, linux-mm@...ck.org,
        linux-hardening@...r.kernel.org, linux-arch@...r.kernel.org
Subject: [PATCH 2/3] x86/dumpstack: Inline copy_from_user_nmi()

The check_object_size() helper under CONFIG_HARDENED_USERCOPY is
designed to skip any checks where the length is known at compile time as
a reasonable heuristic to avoid "likely known-good" cases. However, it can
only do this when the copy_*_user() helpers are, themselves, inline too.

Using find_vmap_area() requires taking a spinlock. The check_object_size()
helper can call find_vmap_area() when the destination is in vmap memory.
If show_regs() is called in interrupt context, it will attempt a call to
copy_from_user_nmi(), which may call check_object_size() and then
find_vmap_area(). If something in normal context happens to be in the
middle of calling find_vmap_area() (with the spinlock held), the interrupt
handler will hang forever.

The copy_from_user_nmi() call is actually being called with a fixed-size
length, so check_object_size() should never have been called in the
first place. In order for check_object_size() to see that the length is
a fixed size, inline copy_from_user_nmi(), as already done with all the
other uaccess helpers.

Reported-by: Yu Zhao <yuzhao@...gle.com>
Link: https://lore.kernel.org/all/CAOUHufaPshtKrTWOz7T7QFYUNVGFm0JBjvM700Nhf9qEL9b3EQ@mail.gmail.com
Reported-by: dev@...-flo.net
Cc: Matthew Wilcox <willy@...radead.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Josh Poimboeuf <jpoimboe@...nel.org>
Cc: Dave Hansen <dave.hansen@...ux.intel.com>
Cc: x86@...nel.org
Fixes: 0aef499f3172 ("mm/usercopy: Detect vmalloc overruns")
Cc: stable@...r.kernel.org
Signed-off-by: Kees Cook <keescook@...omium.org>
---
 arch/x86/include/asm/uaccess.h |  2 --
 arch/x86/kernel/dumpstack.c    |  4 +--
 arch/x86/lib/Makefile          |  2 +-
 arch/x86/lib/usercopy.c        | 50 ----------------------------------
 include/linux/uaccess.h        | 41 ++++++++++++++++++++++++++++
 5 files changed, 44 insertions(+), 55 deletions(-)
 delete mode 100644 arch/x86/lib/usercopy.c

diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index e9390eea861b..f47c0c752e7a 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -498,8 +498,6 @@ struct __large_struct { unsigned long buf[100]; };
 		: : ltype(x), "m" (__m(addr))				\
 		: : label)
 
-extern unsigned long
-copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
 extern __must_check long
 strncpy_from_user(char *dst, const char __user *src, long count);
 
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index afae4dd77495..b59d59ef10d2 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -72,8 +72,8 @@ static void printk_stack_address(unsigned long address, int reliable,
 	printk("%s %s%pBb\n", log_lvl, reliable ? "" : "? ", (void *)address);
 }
 
-static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
-		     unsigned int nbytes)
+static __always_inline int
+copy_code(struct pt_regs *regs, u8 *buf, unsigned long src, unsigned int nbytes)
 {
 	if (!user_mode(regs))
 		return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index f76747862bd2..aeb5cd634e27 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -42,7 +42,7 @@ clean-files := inat-tables.c
 obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
 
 lib-y := delay.o misc.o cmdline.o cpu.o
-lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
+lib-y += usercopy_$(BITS).o getuser.o putuser.o
 lib-y += memcpy_$(BITS).o
 lib-y += pc-conf-reg.o
 lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
deleted file mode 100644
index 959489f2f814..000000000000
--- a/arch/x86/lib/usercopy.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * User address space access functions.
- *
- *  For licencing details see kernel-base/COPYING
- */
-
-#include <linux/uaccess.h>
-#include <linux/export.h>
-
-/**
- * copy_from_user_nmi - NMI safe copy from user
- * @to:		Pointer to the destination buffer
- * @from:	Pointer to a user space address of the current task
- * @n:		Number of bytes to copy
- *
- * Returns: The number of not copied bytes. 0 is success, i.e. all bytes copied
- *
- * Contrary to other copy_from_user() variants this function can be called
- * from NMI context. Despite the name it is not restricted to be called
- * from NMI context. It is safe to be called from any other context as
- * well. It disables pagefaults across the copy which means a fault will
- * abort the copy.
- *
- * For NMI context invocations this relies on the nested NMI work to allow
- * atomic faults from the NMI path; the nested NMI paths are careful to
- * preserve CR2.
- */
-unsigned long
-copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
-{
-	unsigned long ret;
-
-	if (!__access_ok(from, n))
-		return n;
-
-	if (!nmi_uaccess_okay())
-		return n;
-
-	/*
-	 * Even though this function is typically called from NMI/IRQ context
-	 * disable pagefaults so that its behaviour is consistent even when
-	 * called from other contexts.
-	 */
-	pagefault_disable();
-	ret = __copy_from_user_inatomic(to, from, n);
-	pagefault_enable();
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(copy_from_user_nmi);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 065e121d2a86..fee141ed8f95 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -273,6 +273,47 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
 
 #endif		/* ARCH_HAS_NOCACHE_UACCESS */
 
+/**
+ * copy_from_user_nmi - NMI safe copy from user
+ * @to:		Pointer to the destination buffer
+ * @from:	Pointer to a user space address of the current task
+ * @n:		Number of bytes to copy
+ *
+ * Returns: The number of not copied bytes. 0 is success, i.e. all bytes copied
+ *
+ * Contrary to other copy_from_user() variants this function can be called
+ * from NMI context. Despite the name it is not restricted to be called
+ * from NMI context. It is safe to be called from any other context as
+ * well. It disables pagefaults across the copy which means a fault will
+ * abort the copy.
+ *
+ * For NMI context invocations this relies on the nested NMI work to allow
+ * atomic faults from the NMI path; the nested NMI paths are careful to
+ * preserve CR2.
+ */
+static __always_inline unsigned long
+copy_from_user_nmi(void *to, const void __user *from, const unsigned long n)
+{
+	unsigned long ret;
+
+	if (!__access_ok(from, n))
+		return n;
+
+	if (!nmi_uaccess_okay())
+		return n;
+
+	/*
+	 * Even though this function is typically called from NMI/IRQ context
+	 * disable pagefaults so that its behaviour is consistent even when
+	 * called from other contexts.
+	 */
+	pagefault_disable();
+	ret = __copy_from_user_inatomic(to, from, n);
+	pagefault_enable();
+
+	return ret;
+}
+
 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
 
 /**
-- 
2.34.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ