[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260113191516.31015-1-ryabinin.a.a@gmail.com>
Date: Tue, 13 Jan 2026 20:15:15 +0100
From: Andrey Ryabinin <ryabinin.a.a@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: Maciej Żenczykowski <maze@...gle.com>,
Maciej Wieczor-Retman <m.wieczorretman@...me>,
Alexander Potapenko <glider@...gle.com>,
Andrey Konovalov <andreyknvl@...il.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Vincenzo Frascino <vincenzo.frascino@....com>,
kasan-dev@...glegroups.com,
Uladzislau Rezki <urezki@...il.com>,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
joonki.min@...sung-slsi.corp-partner.google.com,
stable@...r.kernel.org
Subject: [PATCH 1/2] mm/kasan: Fix KASAN poisoning in vrealloc()
A KASAN warning can be triggered when vrealloc() changes the requested
size to a value that is not aligned to KASAN_GRANULE_SIZE.
------------[ cut here ]------------
WARNING: CPU: 2 PID: 1 at mm/kasan/shadow.c:174 kasan_unpoison+0x40/0x48
...
pc : kasan_unpoison+0x40/0x48
lr : __kasan_unpoison_vmalloc+0x40/0x68
Call trace:
kasan_unpoison+0x40/0x48 (P)
vrealloc_node_align_noprof+0x200/0x320
bpf_patch_insn_data+0x90/0x2f0
convert_ctx_accesses+0x8c0/0x1158
bpf_check+0x1488/0x1900
bpf_prog_load+0xd20/0x1258
__sys_bpf+0x96c/0xdf0
__arm64_sys_bpf+0x50/0xa0
invoke_syscall+0x90/0x160
Introduce a dedicated kasan_vrealloc() helper that centralizes
KASAN handling for vmalloc reallocations. The helper accounts for KASAN
granule alignment when growing or shrinking an allocation and ensures
that partial granules are handled correctly.
Use this helper from vrealloc_node_align_noprof() to fix poisoning
logic.
Reported-by: Maciej Żenczykowski <maze@...gle.com>
Reported-by: <joonki.min@...sung-slsi.corp-partner.google.com>
Closes: https://lkml.kernel.org/r/CANP3RGeuRW53vukDy7WDO3FiVgu34-xVJYkfpm08oLO3odYFrA@mail.gmail.com
Fixes: d699440f58ce ("mm: fix vrealloc()'s KASAN poisoning logic")
Cc: stable@...r.kernel.org
Signed-off-by: Andrey Ryabinin <ryabinin.a.a@...il.com>
---
include/linux/kasan.h | 6 ++++++
mm/kasan/shadow.c | 24 ++++++++++++++++++++++++
mm/vmalloc.c | 7 ++-----
3 files changed, 32 insertions(+), 5 deletions(-)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 9c6ac4b62eb9..ff27712dd3c8 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -641,6 +641,9 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
__kasan_unpoison_vmap_areas(vms, nr_vms, flags);
}
+void kasan_vrealloc(const void *start, unsigned long old_size,
+ unsigned long new_size);
+
#else /* CONFIG_KASAN_VMALLOC */
static inline void kasan_populate_early_vm_area_shadow(void *start,
@@ -670,6 +673,9 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
kasan_vmalloc_flags_t flags)
{ }
+static inline void kasan_vrealloc(const void *start, unsigned long old_size,
+ unsigned long new_size) { }
+
#endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 32fbdf759ea2..e9b6b2d8e651 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -651,6 +651,30 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size)
kasan_poison(start, size, KASAN_VMALLOC_INVALID, false);
}
+void kasan_vrealloc(const void *addr, unsigned long old_size,
+ unsigned long new_size)
+{
+ if (!kasan_enabled())
+ return;
+
+ if (new_size < old_size) {
+ kasan_poison_last_granule(addr, new_size);
+
+ new_size = round_up(new_size, KASAN_GRANULE_SIZE);
+ old_size = round_up(old_size, KASAN_GRANULE_SIZE);
+ if (new_size < old_size)
+ __kasan_poison_vmalloc(addr + new_size,
+ old_size - new_size);
+ } else if (new_size > old_size) {
+ old_size = round_down(old_size, KASAN_GRANULE_SIZE);
+ __kasan_unpoison_vmalloc(addr + old_size,
+ new_size - old_size,
+ KASAN_VMALLOC_PROT_NORMAL |
+ KASAN_VMALLOC_VM_ALLOC |
+ KASAN_VMALLOC_KEEP_TAG);
+ }
+}
+
#else /* CONFIG_KASAN_VMALLOC */
int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 41dd01e8430c..2536d34df058 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4322,7 +4322,7 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align
if (want_init_on_free() || want_init_on_alloc(flags))
memset((void *)p + size, 0, old_size - size);
vm->requested_size = size;
- kasan_poison_vmalloc(p + size, old_size - size);
+ kasan_vrealloc(p, old_size, size);
return (void *)p;
}
@@ -4330,16 +4330,13 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align
* We already have the bytes available in the allocation; use them.
*/
if (size <= alloced_size) {
- kasan_unpoison_vmalloc(p + old_size, size - old_size,
- KASAN_VMALLOC_PROT_NORMAL |
- KASAN_VMALLOC_VM_ALLOC |
- KASAN_VMALLOC_KEEP_TAG);
/*
* No need to zero memory here, as unused memory will have
* already been zeroed at initial allocation time or during
* realloc shrink time.
*/
vm->requested_size = size;
+ kasan_vrealloc(p, old_size, size);
return (void *)p;
}
--
2.52.0
Powered by blists - more mailing lists