lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9692c93d-1482-4750-a8fc-0ff060028675@I-love.SAKURA.ne.jp>
Date: Thu, 7 Mar 2024 07:08:13 +0900
From: Tetsuo Handa <penguin-kernel@...ove.SAKURA.ne.jp>
To: Linus Torvalds <torvalds@...ux-foundation.org>,
        Alexander Potapenko <glider@...gle.com>,
        Marco Elver <elver@...gle.com>, Dmitry Vyukov <dvyukov@...gle.com>,
        kasan-dev <kasan-dev@...glegroups.com>
Cc: LKML <linux-kernel@...r.kernel.org>,
        the arch/x86 maintainers <x86@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>,
        Borislav Petkov <bp@...en8.de>,
        Dave Hansen <dave.hansen@...ux.intel.com>,
        "H. Peter Anvin" <hpa@...or.com>
Subject: Re: [PATCH v2] x86: disable non-instrumented version of copy_mc when
 KMSAN is enabled

Thank you for explanation.

On 2024/03/06 2:57, Linus Torvalds wrote:
> I think the KMSAN people need to tell us how to tell kmsan that it's a
> memcpy (and about the "I'm going to touch this part of memory", needed
> for the "copy_mv_to_user" side).
> 
> So somebody needs to abstract out that
> 
>         depot_stack_handle_t origin;
> 
>         if (!kmsan_enabled || kmsan_in_runtime())
>                 return;
> 
>         kmsan_enter_runtime();
>         /* Using memmove instead of memcpy doesn't affect correctness. */
>         kmsan_internal_memmove_metadata(dst, (void *)src, n);
>         kmsan_leave_runtime();
> 
>         set_retval_metadata(shadow, origin);
> 
> kind of thing, and expose it as a helper function for "I did something
> that looks like a memory copy", the same way that we currently have
> kmsan_copy_page_meta()

Something like below one? Can we assume that 0 <= ret <= len is always true?

diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
index 6e8b7e600def..6858f80fc9a2 100644
--- a/arch/x86/lib/copy_mc.c
+++ b/arch/x86/lib/copy_mc.c
@@ -61,12 +61,18 @@ unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned
  */
 unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
 {
-	if (copy_mc_fragile_enabled)
-		return copy_mc_fragile(dst, src, len);
-	if (static_cpu_has(X86_FEATURE_ERMS))
-		return copy_mc_enhanced_fast_string(dst, src, len);
-	memcpy(dst, src, len);
-	return 0;
+	unsigned long ret;
+
+	if (copy_mc_fragile_enabled) {
+		ret = copy_mc_fragile(dst, src, len);
+	} else if (static_cpu_has(X86_FEATURE_ERMS)) {
+		ret = copy_mc_enhanced_fast_string(dst, src, len);
+	} else {
+		memcpy(dst, src, len);
+		ret = 0;
+	}
+	kmsan_memmove(dst, src, len - ret);
+	return ret;
 }
 EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
 
@@ -78,15 +84,13 @@ unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, un
 		__uaccess_begin();
 		ret = copy_mc_fragile((__force void *)dst, src, len);
 		__uaccess_end();
-		return ret;
-	}
-
-	if (static_cpu_has(X86_FEATURE_ERMS)) {
+	} else if (static_cpu_has(X86_FEATURE_ERMS)) {
 		__uaccess_begin();
 		ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
 		__uaccess_end();
-		return ret;
+	} else {
+		ret = copy_user_generic((__force void *)dst, src, len);
 	}
-
-	return copy_user_generic((__force void *)dst, src, len);
+	kmsan_copy_to_user(dst, src, len, ret);
+	return ret;
 }
diff --git a/include/linux/kmsan-checks.h b/include/linux/kmsan-checks.h
index c4cae333deec..4c2a614dab2d 100644
--- a/include/linux/kmsan-checks.h
+++ b/include/linux/kmsan-checks.h
@@ -61,6 +61,17 @@ void kmsan_check_memory(const void *address, size_t size);
 void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
 			size_t left);
 
+/**
+ * kmsan_memmove() - Notify KMSAN about a data copy within kernel.
+ * @to:   destination address in the kernel.
+ * @from: source address in the kernel.
+ * @size: number of bytes to copy.
+ *
+ * Invoked after non-instrumented version (e.g. implemented using assembly
+ * code) of memmove()/memcpy() is called, in order to copy KMSAN's metadata.
+ */
+void kmsan_memmove(void *to, const void *from, size_t size);
+
 #else
 
 static inline void kmsan_poison_memory(const void *address, size_t size,
@@ -77,6 +88,9 @@ static inline void kmsan_copy_to_user(void __user *to, const void *from,
 				      size_t to_copy, size_t left)
 {
 }
+static inline void kmsan_memmove(void *to, const void *from, size_t size)
+{
+}
 
 #endif
 
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index 5d6e2dee5692..364f778ee226 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -285,6 +285,17 @@ void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
 }
 EXPORT_SYMBOL(kmsan_copy_to_user);
 
+void kmsan_memmove(void *to, const void *from, size_t size)
+{
+	if (!kmsan_enabled || kmsan_in_runtime())
+		return;
+
+	kmsan_enter_runtime();
+	kmsan_internal_memmove_metadata(to, (void *)from, size);
+	kmsan_leave_runtime();
+}
+EXPORT_SYMBOL(kmsan_memmove);
+
 /* Helper function to check an URB. */
 void kmsan_handle_urb(const struct urb *urb, bool is_out)
 {


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ