[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0000000000005ee46906122bbf6d@google.com>
Date: Sat, 24 Feb 2024 18:42:43 -0800
From: syzbot <syzbot+d7521c1e3841ed075a42@...kaller.appspotmail.com>
To: linux-kernel@...r.kernel.org
Subject: Re: [syzbot] Re: [syzbot] [virtualization?] KMSAN: uninit-value in
virtqueue_add (4)
For archival purposes, forwarding an incoming command email to
linux-kernel@...r.kernel.org.
***
Subject: Re: [syzbot] [virtualization?] KMSAN: uninit-value in virtqueue_add (4)
Author: penguin-kernel@...ove.sakura.ne.jp
#syz test: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git v6.7
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index cc6b8e087192..f13bba3a9dab 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -58,7 +58,16 @@ static inline void clear_page(void *page)
: "cc", "memory", "rax", "rcx");
}
+#ifdef CONFIG_KMSAN
+/* Use of non-instrumented assembly version confuses KMSAN. */
+void *memcpy(void *to, const void *from, __kernel_size_t len);
+static inline void copy_page(void *to, void *from)
+{
+ memcpy(to, from, PAGE_SIZE);
+}
+#else
void copy_page(void *to, void *from);
+#endif
#ifdef CONFIG_X86_5LEVEL
/*
diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
index 6e8b7e600def..bc701dcbb133 100644
--- a/arch/x86/lib/copy_mc.c
+++ b/arch/x86/lib/copy_mc.c
@@ -61,9 +61,9 @@ unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned
*/
unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
{
- if (copy_mc_fragile_enabled)
+ if (0 && copy_mc_fragile_enabled)
return copy_mc_fragile(dst, src, len);
- if (static_cpu_has(X86_FEATURE_ERMS))
+ if (0 && static_cpu_has(X86_FEATURE_ERMS))
return copy_mc_enhanced_fast_string(dst, src, len);
memcpy(dst, src, len);
return 0;
@@ -74,14 +74,14 @@ unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, un
{
unsigned long ret;
- if (copy_mc_fragile_enabled) {
+ if (0 && copy_mc_fragile_enabled) {
__uaccess_begin();
ret = copy_mc_fragile((__force void *)dst, src, len);
__uaccess_end();
return ret;
}
- if (static_cpu_has(X86_FEATURE_ERMS)) {
+ if (0 && static_cpu_has(X86_FEATURE_ERMS)) {
__uaccess_begin();
ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
__uaccess_end();
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e0aa6b440ca5..039ffa49f324 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -253,11 +253,16 @@ size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i)
{
+ size_t ret;
+
if (unlikely(i->count < bytes))
bytes = i->count;
if (unlikely(!bytes))
return 0;
- return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc);
+ ret = iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc);
+ if (bytes != ret)
+ printk("addr=%px bytes=%d ret=%d\n", addr, bytes, ret);
+ return ret;
}
static __always_inline
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index 5d6e2dee5692..0b09daa188ef 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -359,6 +359,12 @@ void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
}
/* Functions from kmsan-checks.h follow. */
+
+/*
+ * To create an origin, kmsan_poison_memory() unwinds the stacks and stores it
+ * into the stack depot. This may cause deadlocks if done from within KMSAN
+ * runtime, therefore we bail out if kmsan_in_runtime().
+ */
void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
{
if (!kmsan_enabled || kmsan_in_runtime())
@@ -371,47 +377,31 @@ void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
}
EXPORT_SYMBOL(kmsan_poison_memory);
+/*
+ * Unlike kmsan_poison_memory(), this function can be used from within KMSAN
+ * runtime, because it does not trigger allocations or call instrumented code.
+ */
void kmsan_unpoison_memory(const void *address, size_t size)
{
unsigned long ua_flags;
- if (!kmsan_enabled || kmsan_in_runtime())
+ if (!kmsan_enabled)
return;
ua_flags = user_access_save();
- kmsan_enter_runtime();
/* The users may want to poison/unpoison random memory. */
kmsan_internal_unpoison_memory((void *)address, size,
KMSAN_POISON_NOCHECK);
- kmsan_leave_runtime();
user_access_restore(ua_flags);
}
EXPORT_SYMBOL(kmsan_unpoison_memory);
/*
- * Version of kmsan_unpoison_memory() that can be called from within the KMSAN
- * runtime.
- *
- * Non-instrumented IRQ entry functions receive struct pt_regs from assembly
- * code. Those regs need to be unpoisoned, otherwise using them will result in
- * false positives.
- * Using kmsan_unpoison_memory() is not an option in entry code, because the
- * return value of in_task() is inconsistent - as a result, certain calls to
- * kmsan_unpoison_memory() are ignored. kmsan_unpoison_entry_regs() ensures that
- * the registers are unpoisoned even if kmsan_in_runtime() is true in the early
- * entry code.
+ * Version of kmsan_unpoison_memory() called from IRQ entry functions.
*/
void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
{
- unsigned long ua_flags;
-
- if (!kmsan_enabled)
- return;
-
- ua_flags = user_access_save();
- kmsan_internal_unpoison_memory((void *)regs, sizeof(*regs),
- KMSAN_POISON_NOCHECK);
- user_access_restore(ua_flags);
+ kmsan_unpoison_memory((void *)regs, sizeof(*regs));
}
void kmsan_check_memory(const void *addr, size_t size)
Powered by blists - more mailing lists