lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20251128111516.244497-1-jiayuan.chen@linux.dev>
Date: Fri, 28 Nov 2025 19:15:14 +0800
From: Jiayuan Chen <jiayuan.chen@...ux.dev>
To: linux-mm@...ck.org
Cc: Jiayuan Chen <jiayuan.chen@...ux.dev>,
	syzbot+997752115a851cb0cf36@...kaller.appspotmail.com,
	Andrey Ryabinin <ryabinin.a.a@...il.com>,
	Alexander Potapenko <glider@...gle.com>,
	Andrey Konovalov <andreyknvl@...il.com>,
	Dmitry Vyukov <dvyukov@...gle.com>,
	Vincenzo Frascino <vincenzo.frascino@....com>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Uladzislau Rezki <urezki@...il.com>,
	Danilo Krummrich <dakr@...nel.org>,
	Kees Cook <kees@...nel.org>,
	kasan-dev@...glegroups.com,
	linux-kernel@...r.kernel.org
Subject: [PATCH v1] mm/kasan: Fix incorrect unpoisoning in vrealloc for KASAN

Syzkaller reported a memory out-of-bounds bug [1]. This patch fixes two
issues:

1. In vrealloc, we were missing the KASAN_VMALLOC_VM_ALLOC flag when
   unpoisoning the extended region. This flag is required to correctly
   associate the allocation with KASAN's vmalloc tracking.

   Note: In contrast, vzalloc (via __vmalloc_node_range_noprof) explicitly
   sets KASAN_VMALLOC_VM_ALLOC and calls kasan_unpoison_vmalloc() with it.
   vrealloc must behave consistently — especially when reusing existing
   vmalloc regions — to ensure KASAN can track allocations correctly.

2. When vrealloc reuses an existing vmalloc region (without allocating new
   pages), KASAN previously generated a new tag, which broke tag-based
   memory access tracking. We now add a 'reuse_tag' parameter to
   __kasan_unpoison_vmalloc() to preserve the original tag in such cases.

A new helper kasan_unpoison_vralloc() is introduced to handle this reuse
scenario, ensuring consistent tag behavior during reallocation.

[1]: https://syzkaller.appspot.com/bug?extid=997752115a851cb0cf36

Fixes: a0309faf1cb0 ("mm: vmalloc: support more granular vrealloc() sizing")
Reported-by: syzbot+997752115a851cb0cf36@...kaller.appspotmail.com
Closes: https://lore.kernel.org/all/68e243a2.050a0220.1696c6.007d.GAE@google.com/T/

Signed-off-by: Jiayuan Chen <jiayuan.chen@...ux.dev>
---
 include/linux/kasan.h | 21 +++++++++++++++++++--
 mm/kasan/hw_tags.c    |  4 ++--
 mm/kasan/shadow.c     |  6 ++++--
 mm/vmalloc.c          |  4 ++--
 4 files changed, 27 insertions(+), 8 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index f335c1d7b61d..14e59e898c29 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -612,13 +612,23 @@ static inline void kasan_release_vmalloc(unsigned long start,
 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 
 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
-			       kasan_vmalloc_flags_t flags);
+			       kasan_vmalloc_flags_t flags, bool reuse_tag);
+
+static __always_inline void *kasan_unpoison_vrealloc(const void *start,
+						     unsigned long size,
+						     kasan_vmalloc_flags_t flags)
+{
+	if (kasan_enabled())
+		return __kasan_unpoison_vmalloc(start, size, flags, true);
+	return (void *)start;
+}
+
 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
 						unsigned long size,
 						kasan_vmalloc_flags_t flags)
 {
 	if (kasan_enabled())
-		return __kasan_unpoison_vmalloc(start, size, flags);
+		return __kasan_unpoison_vmalloc(start, size, flags, false);
 	return (void *)start;
 }
 
@@ -645,6 +655,13 @@ static inline void kasan_release_vmalloc(unsigned long start,
 					 unsigned long free_region_end,
 					 unsigned long flags) { }
 
+static inline void *kasan_unpoison_vrealloc(const void *start,
+					    unsigned long size,
+					    kasan_vmalloc_flags_t flags)
+{
+	return (void *)start;
+}
+
 static inline void *kasan_unpoison_vmalloc(const void *start,
 					   unsigned long size,
 					   kasan_vmalloc_flags_t flags)
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 1c373cc4b3fa..04a62ac27165 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -317,7 +317,7 @@ static void init_vmalloc_pages(const void *start, unsigned long size)
 }
 
 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
-				kasan_vmalloc_flags_t flags)
+				kasan_vmalloc_flags_t flags, bool reuse_tag)
 {
 	u8 tag;
 	unsigned long redzone_start, redzone_size;
@@ -361,7 +361,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
 		return (void *)start;
 	}
 
-	tag = kasan_random_tag();
+	tag = reuse_tag ? get_tag(start) : kasan_random_tag();
 	start = set_tag(start, tag);
 
 	/* Unpoison and initialize memory up to size. */
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 29a751a8a08d..354842c7f927 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -611,7 +611,7 @@ void __kasan_release_vmalloc(unsigned long start, unsigned long end,
 }
 
 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
-			       kasan_vmalloc_flags_t flags)
+			       kasan_vmalloc_flags_t flags, bool reuse_tag)
 {
 	/*
 	 * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC
@@ -631,7 +631,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
 	    !(flags & KASAN_VMALLOC_PROT_NORMAL))
 		return (void *)start;
 
-	start = set_tag(start, kasan_random_tag());
+	if (!reuse_tag)
+		start = set_tag(start, kasan_random_tag());
+
 	kasan_unpoison(start, size, false);
 	return (void *)start;
 }
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ecbac900c35f..1ddd6ffc89c1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4330,8 +4330,8 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align
 	 * We already have the bytes available in the allocation; use them.
 	 */
 	if (size <= alloced_size) {
-		kasan_unpoison_vmalloc(p + old_size, size - old_size,
-				       KASAN_VMALLOC_PROT_NORMAL);
+		kasan_unpoison_vrealloc(p, size,
+					KASAN_VMALLOC_PROT_NORMAL | KASAN_VMALLOC_VM_ALLOC);
 		/*
 		 * No need to zero memory here, as unused memory will have
 		 * already been zeroed at initial allocation time or during
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ