lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 30 Mar 2022 11:41:56 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     Alexander Potapenko <glider@...gle.com>
Cc:     Alexander Viro <viro@...iv.linux.org.uk>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Andrey Konovalov <andreyknvl@...gle.com>,
        Andy Lutomirski <luto@...nel.org>,
        Arnd Bergmann <arnd@...db.de>, Borislav Petkov <bp@...en8.de>,
        Christoph Hellwig <hch@....de>,
        Christoph Lameter <cl@...ux.com>,
        David Rientjes <rientjes@...gle.com>,
        Dmitry Vyukov <dvyukov@...gle.com>,
        Eric Dumazet <edumazet@...gle.com>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        Herbert Xu <herbert@...dor.apana.org.au>,
        Ilya Leoshkevich <iii@...ux.ibm.com>,
        Ingo Molnar <mingo@...hat.com>, Jens Axboe <axboe@...nel.dk>,
        Joonsoo Kim <iamjoonsoo.kim@....com>,
        Kees Cook <keescook@...omium.org>,
        Marco Elver <elver@...gle.com>,
        Mark Rutland <mark.rutland@....com>,
        Matthew Wilcox <willy@...radead.org>,
        "Michael S. Tsirkin" <mst@...hat.com>,
        Pekka Enberg <penberg@...nel.org>,
        Petr Mladek <pmladek@...e.com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Vasily Gorbik <gor@...ux.ibm.com>,
        Vegard Nossum <vegard.nossum@...cle.com>,
        Vlastimil Babka <vbabka@...e.cz>, linux-mm@...ck.org,
        linux-arch@...r.kernel.org, linux-kernel@...r.kernel.org,
        ryabinin.a.a@...il.com
Subject: Re: [PATCH v2 13/48] kmsan: add KMSAN runtime core

On Wed, Mar 30, 2022 at 10:58:26AM +0200, Peter Zijlstra wrote:
> On Tue, Mar 29, 2022 at 02:39:42PM +0200, Alexander Potapenko wrote:
> > +/* Handle llvm.memmove intrinsic. */
> > +void *__msan_memmove(void *dst, const void *src, uintptr_t n)
> > +{
> > +	void *result;
> > +
> > +	result = __memmove(dst, src, n);
> > +	if (!n)
> > +		/* Some people call memmove() with zero length. */
> > +		return result;
> > +	if (!kmsan_enabled || kmsan_in_runtime())
> > +		return result;
> > +
> > +	kmsan_internal_memmove_metadata(dst, (void *)src, n);
> > +
> > +	return result;
> > +}
> > +EXPORT_SYMBOL(__msan_memmove);
> > +
> > +/* Handle llvm.memcpy intrinsic. */
> > +void *__msan_memcpy(void *dst, const void *src, uintptr_t n)
> > +{
> > +	void *result;
> > +
> > +	result = __memcpy(dst, src, n);
> > +	if (!n)
> > +		/* Some people call memcpy() with zero length. */
> > +		return result;
> > +
> > +	if (!kmsan_enabled || kmsan_in_runtime())
> > +		return result;
> > +
> > +	/* Using memmove instead of memcpy doesn't affect correctness. */
> > +	kmsan_internal_memmove_metadata(dst, (void *)src, n);
> > +
> > +	return result;
> > +}
> > +EXPORT_SYMBOL(__msan_memcpy);
> > +
> > +/* Handle llvm.memset intrinsic. */
> > +void *__msan_memset(void *dst, int c, uintptr_t n)
> > +{
> > +	void *result;
> > +
> > +	result = __memset(dst, c, n);
> > +	if (!kmsan_enabled || kmsan_in_runtime())
> > +		return result;
> > +
> > +	kmsan_enter_runtime();
> > +	/*
> > +	 * Clang doesn't pass parameter metadata here, so it is impossible to
> > +	 * use shadow of @c to set up the shadow for @dst.
> > +	 */
> > +	kmsan_internal_unpoison_memory(dst, n, /*checked*/ false);
> > +	kmsan_leave_runtime();
> > +
> > +	return result;
> > +}
> > +EXPORT_SYMBOL(__msan_memset);
> 
> This, we need this same for KASAN. KASAN must be changed to have the
> mem*() intrinsics emit __asan_mem*(), such that we can have
> uninstrumented base functions.
> 
> Currently we seem to have the problem that when a noinstr function trips
> one of those instrinsics it'll emit a call to an instrumented function,
> which is a complete no-no.
> 
> Also see:
> 
>   https://lore.kernel.org/all/YjxTt3pFIcV3lt8I@zn.tnic/T/#m2049a14be400d4ae2b54a1f7da3ede28b7fd7564
> 
> Given the helpful feedback there, Mark and me are going to unilaterally
> break Kasan by deleting the existing wrappers.

specifically, I was thinking of something like the below...

(potentially more architectures are affected)

---
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 23048be0333b..909ffbce8438 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -149,6 +149,7 @@ config ARM64
 	select HAVE_ARCH_JUMP_LABEL_RELATIVE
 	select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
 	select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
+	select HAVE_ARCH_KASAN_NOINSTR if HAVE_ARCH_KASAN
 	select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
 	select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
 	# Some instrumentation may be unsound, hence EXPERT
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
index 4ab48d49c451..9f6ed674e420 100644
--- a/arch/arm64/lib/memcpy.S
+++ b/arch/arm64/lib/memcpy.S
@@ -242,12 +242,12 @@ SYM_FUNC_END(__pi_memcpy)
 
 SYM_FUNC_ALIAS(__memcpy, __pi_memcpy)
 EXPORT_SYMBOL(__memcpy)
-SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
+SYM_FUNC_ALIAS(memcpy, __memcpy)
 EXPORT_SYMBOL(memcpy)
 
 SYM_FUNC_ALIAS(__pi_memmove, __pi_memcpy)
 
 SYM_FUNC_ALIAS(__memmove, __pi_memmove)
 EXPORT_SYMBOL(__memmove)
-SYM_FUNC_ALIAS_WEAK(memmove, __memmove)
+SYM_FUNC_ALIAS(memmove, __memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S
index a5aebe82ad73..c41ae56ce6a8 100644
--- a/arch/arm64/lib/memset.S
+++ b/arch/arm64/lib/memset.S
@@ -206,5 +206,5 @@ SYM_FUNC_END(__pi_memset)
 SYM_FUNC_ALIAS(__memset, __pi_memset)
 EXPORT_SYMBOL(__memset)
 
-SYM_FUNC_ALIAS_WEAK(memset, __pi_memset)
+SYM_FUNC_ALIAS(memset, __pi_memset)
 EXPORT_SYMBOL(memset)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7340d9f01b62..a89881ad0568 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -166,6 +166,7 @@ config X86
 	select HAVE_ARCH_JUMP_LABEL_RELATIVE
 	select HAVE_ARCH_KASAN			if X86_64
 	select HAVE_ARCH_KASAN_VMALLOC		if X86_64
+	select HAVE_ARCH_KASAN_NOINSTR		if X86_64
 	select HAVE_ARCH_KFENCE
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_MMAP_RND_BITS		if MMU
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index d0d7b9bc6cad..d5e1a2d4a41a 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -42,7 +42,7 @@ SYM_FUNC_START(__memcpy)
 SYM_FUNC_END(__memcpy)
 EXPORT_SYMBOL(__memcpy)
 
-SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
+SYM_FUNC_ALIAS(memcpy, __memcpy)
 EXPORT_SYMBOL(memcpy)
 
 /*
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index d83cba364e31..a13711b645fb 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -208,5 +208,5 @@ SYM_FUNC_START(__memmove)
 SYM_FUNC_END(__memmove)
 EXPORT_SYMBOL(__memmove)
 
-SYM_FUNC_ALIAS_WEAK(memmove, __memmove)
+SYM_FUNC_ALIAS(memmove, __memmove)
 EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index fc9ffd3ff3b2..29299a926962 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -43,7 +43,7 @@ SYM_FUNC_START(__memset)
 SYM_FUNC_END(__memset)
 EXPORT_SYMBOL(__memset)
 
-SYM_FUNC_ALIAS_WEAK(memset, __memset)
+SYM_FUNC_ALIAS(memset, __memset)
 EXPORT_SYMBOL(memset)
 
 /*
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 1f3e620188a2..7d4815bfa9ae 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -12,6 +12,9 @@ config HAVE_ARCH_KASAN_HW_TAGS
 config HAVE_ARCH_KASAN_VMALLOC
 	bool
 
+config HAVE_ARCH_KASAN_NOINSTR
+	bool
+
 config ARCH_DISABLE_KASAN_INLINE
 	bool
 	help
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index a4f07de21771..6fd542061625 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -38,6 +38,44 @@ bool __kasan_check_write(const volatile void *p, unsigned int size)
 }
 EXPORT_SYMBOL(__kasan_check_write);
 
+/*
+ * noinstr archs require uninstrumented base functions, as such their kasan
+ * implementation must use __asan_mem*() functions if they want
+ * instrumentation.
+ */
+#ifdef HAVE_ARCH_KASAN_NOINSTR
+
+void *__asan_memset(void *addr, int c, size_t len)
+{
+	if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_))
+		return NULL;
+
+	return __memset(addr, c, len);
+}
+
+#ifdef __HAVE_ARCH_MEMMOVE
+#undef memmove
+void *__asan_memmove(void *dest, const void *src, size_t len)
+{
+	if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) ||
+	    !kasan_check_range((unsigned long)dest, len, true, _RET_IP_))
+		return NULL;
+
+	return __memmove(dest, src, len);
+}
+#endif
+
+#undef memcpy
+void *__asan_memcpy(void *dest, const void *src, size_t len)
+{
+	if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) ||
+	    !kasan_check_range((unsigned long)dest, len, true, _RET_IP_))
+		return NULL;
+
+	return __memcpy(dest, src, len);
+}
+#else
+
 #undef memset
 void *memset(void *addr, int c, size_t len)
 {
@@ -69,6 +107,8 @@ void *memcpy(void *dest, const void *src, size_t len)
 	return __memcpy(dest, src, len);
 }
 
+#endif
+
 void kasan_poison(const void *addr, size_t size, u8 value, bool init)
 {
 	void *shadow_start, *shadow_end;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ