[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <ac4e6fd5fde6f8d87fba1745860d93087c53b2cd.1699297309.git.andreyknvl@google.com>
Date: Mon, 6 Nov 2023 21:10:19 +0100
From: andrey.konovalov@...ux.dev
To: Marco Elver <elver@...gle.com>,
Alexander Potapenko <glider@...gle.com>
Cc: Andrey Konovalov <andreyknvl@...il.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
kasan-dev@...glegroups.com, Evgenii Stepanov <eugenis@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Andrey Konovalov <andreyknvl@...gle.com>
Subject: [PATCH RFC 10/20] kasan: clean up and rename ____kasan_kmalloc
From: Andrey Konovalov <andreyknvl@...gle.com>
Introduce a new poison_kmalloc_redzone helper function that poisons
the redzone for kmalloc object.
Drop the confusingly named ____kasan_kmalloc function and instead use
poison_kmalloc_redzone along with the other required parts of
____kasan_kmalloc in the callers' code.
This is a preparatory change for the following patches in this series.
Signed-off-by: Andrey Konovalov <andreyknvl@...gle.com>
---
mm/kasan/common.c | 42 ++++++++++++++++++++++--------------------
1 file changed, 22 insertions(+), 20 deletions(-)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 683d0dad32f2..ceb06d5f169f 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -302,26 +302,12 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
return tagged_object;
}
-static inline void *____kasan_kmalloc(struct kmem_cache *cache,
+static inline void poison_kmalloc_redzone(struct kmem_cache *cache,
const void *object, size_t size, gfp_t flags)
{
unsigned long redzone_start;
unsigned long redzone_end;
- if (gfpflags_allow_blocking(flags))
- kasan_quarantine_reduce();
-
- if (unlikely(object == NULL))
- return NULL;
-
- if (is_kfence_address(kasan_reset_tag(object)))
- return (void *)object;
-
- /*
- * The object has already been unpoisoned by kasan_slab_alloc() for
- * kmalloc() or by kasan_krealloc() for krealloc().
- */
-
/*
* The redzone has byte-level precision for the generic mode.
* Partially poison the last object granule to cover the unaligned
@@ -345,14 +331,25 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
kasan_save_alloc_info(cache, (void *)object, flags);
- /* Keep the tag that was set by kasan_slab_alloc(). */
- return (void *)object;
}
void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
size_t size, gfp_t flags)
{
- return ____kasan_kmalloc(cache, object, size, flags);
+ if (gfpflags_allow_blocking(flags))
+ kasan_quarantine_reduce();
+
+ if (unlikely(object == NULL))
+ return NULL;
+
+ if (is_kfence_address(kasan_reset_tag(object)))
+ return (void *)object;
+
+ /* The object has already been unpoisoned by kasan_slab_alloc(). */
+ poison_kmalloc_redzone(cache, object, size, flags);
+
+ /* Keep the tag that was set by kasan_slab_alloc(). */
+ return (void *)object;
}
EXPORT_SYMBOL(__kasan_kmalloc);
@@ -398,6 +395,9 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object;
+ if (is_kfence_address(kasan_reset_tag(object)))
+ return (void *)object;
+
/*
* Unpoison the object's data.
* Part of it might already have been unpoisoned, but it's unknown
@@ -410,8 +410,10 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
if (unlikely(!slab))
return __kasan_kmalloc_large(object, size, flags);
- else
- return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
+ else {
+ poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
+ return (void *)object;
+ }
}
bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
--
2.25.1
Powered by blists - more mailing lists