[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <93317097b668519d76097fb065201b2027436e22.1703024586.git.andreyknvl@google.com>
Date: Tue, 19 Dec 2023 23:28:55 +0100
From: andrey.konovalov@...ux.dev
To: Marco Elver <elver@...gle.com>,
Alexander Potapenko <glider@...gle.com>
Cc: Andrey Konovalov <andreyknvl@...il.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
kasan-dev@...glegroups.com,
Evgenii Stepanov <eugenis@...gle.com>,
Breno Leitao <leitao@...ian.org>,
Alexander Lobakin <alobakin@...me>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Andrey Konovalov <andreyknvl@...gle.com>
Subject: [PATCH mm 11/21] kasan: introduce poison_kmalloc_large_redzone
From: Andrey Konovalov <andreyknvl@...gle.com>
Split out a poison_kmalloc_large_redzone helper from
__kasan_kmalloc_large and use it in the caller's code.
This is a preparatory change for the following patches in this series.
Signed-off-by: Andrey Konovalov <andreyknvl@...gle.com>
---
mm/kasan/common.c | 41 +++++++++++++++++++++++------------------
1 file changed, 23 insertions(+), 18 deletions(-)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 1217b260abc3..962805bf5f62 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -363,23 +363,12 @@ void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object
}
EXPORT_SYMBOL(__kasan_kmalloc);
-void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
+static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
- if (gfpflags_allow_blocking(flags))
- kasan_quarantine_reduce();
-
- if (unlikely(ptr == NULL))
- return NULL;
-
- /*
- * The object has already been unpoisoned by kasan_unpoison_pages() for
- * alloc_pages() or by kasan_krealloc() for krealloc().
- */
-
/*
* The redzone has byte-level precision for the generic mode.
* Partially poison the last object granule to cover the unaligned
@@ -389,12 +378,25 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
kasan_poison_last_granule(ptr, size);
/* Poison the aligned part of the redzone. */
- redzone_start = round_up((unsigned long)(ptr + size),
- KASAN_GRANULE_SIZE);
+ redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
kasan_poison((void *)redzone_start, redzone_end - redzone_start,
KASAN_PAGE_REDZONE, false);
+}
+void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags)
+{
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
+ if (unlikely(ptr == NULL))
+ return NULL;
+
+ /* The object has already been unpoisoned by kasan_unpoison_pages(). */
+ poison_kmalloc_large_redzone(ptr, size, flags);
+
+ /* Keep the tag that was set by alloc_pages(). */
return (void *)ptr;
}
@@ -402,6 +404,9 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
{
struct slab *slab;
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object;
@@ -419,11 +424,11 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
if (unlikely(!slab))
- return __kasan_kmalloc_large(object, size, flags);
- else {
+ poison_kmalloc_large_redzone(object, size, flags);
+ else
poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
- return (void *)object;
- }
+
+ return (void *)object;
}
bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
--
2.25.1
Powered by blists - more mailing lists