[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <8ae180159c3789ca75bc69857958c31d25ea96ee.1658189199.git.andreyknvl@google.com>
Date: Tue, 19 Jul 2022 02:09:55 +0200
From: andrey.konovalov@...ux.dev
To: Marco Elver <elver@...gle.com>,
Alexander Potapenko <glider@...gle.com>
Cc: Andrey Konovalov <andreyknvl@...il.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Andrey Ryabinin <ryabinin.a.a@...il.com>,
kasan-dev@...glegroups.com, Peter Collingbourne <pcc@...gle.com>,
Evgenii Stepanov <eugenis@...gle.com>,
Florian Mayer <fmayer@...gle.com>,
Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Andrey Konovalov <andreyknvl@...gle.com>
Subject: [PATCH mm v2 15/33] kasan: only define kasan_never_merge for Generic mode
From: Andrey Konovalov <andreyknvl@...gle.com>
KASAN prevents merging of slab caches whose objects have per-object
metadata stored in redzones.
As now only the Generic mode uses per-object metadata, define
kasan_never_merge() only for this mode.
Signed-off-by: Andrey Konovalov <andreyknvl@...gle.com>
---
include/linux/kasan.h | 18 ++++++------------
mm/kasan/common.c | 8 --------
mm/kasan/generic.c | 8 ++++++++
3 files changed, 14 insertions(+), 20 deletions(-)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 027df7599573..9743d4b3a918 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -103,14 +103,6 @@ struct kasan_cache {
bool is_kmalloc;
};
-slab_flags_t __kasan_never_merge(void);
-static __always_inline slab_flags_t kasan_never_merge(void)
-{
- if (kasan_enabled())
- return __kasan_never_merge();
- return 0;
-}
-
void __kasan_unpoison_range(const void *addr, size_t size);
static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
{
@@ -261,10 +253,6 @@ static __always_inline bool kasan_check_byte(const void *addr)
#else /* CONFIG_KASAN */
-static inline slab_flags_t kasan_never_merge(void)
-{
- return 0;
-}
static inline void kasan_unpoison_range(const void *address, size_t size) {}
static inline void kasan_poison_pages(struct page *page, unsigned int order,
bool init) {}
@@ -325,6 +313,7 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
#ifdef CONFIG_KASAN_GENERIC
size_t kasan_metadata_size(struct kmem_cache *cache);
+slab_flags_t kasan_never_merge(void);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
@@ -338,6 +327,11 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache)
{
return 0;
}
+/* And thus nothing prevents cache merging. */
+static inline slab_flags_t kasan_never_merge(void)
+{
+ return 0;
+}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 0cef41f8a60d..e4ff0e4e7a9d 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -88,14 +88,6 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
}
#endif /* CONFIG_KASAN_STACK */
-/* Only allow cache merging when no per-object metadata is present. */
-slab_flags_t __kasan_never_merge(void)
-{
- if (kasan_requires_meta())
- return SLAB_KASAN;
- return 0;
-}
-
void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
{
u8 tag;
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 806ab92032c3..25333bf3c99f 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -328,6 +328,14 @@ DEFINE_ASAN_SET_SHADOW(f3);
DEFINE_ASAN_SET_SHADOW(f5);
DEFINE_ASAN_SET_SHADOW(f8);
+/* Only allow cache merging when no per-object metadata is present. */
+slab_flags_t kasan_never_merge(void)
+{
+ if (!kasan_requires_meta())
+ return 0;
+ return SLAB_KASAN;
+}
+
/*
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
* For larger allocations larger redzones are used.
--
2.25.1
Powered by blists - more mailing lists