[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1cf400f36ab1fd3c83e7626c3797cb11ebf9ef7f.1612538932.git.andreyknvl@google.com>
Date: Fri, 5 Feb 2021 16:39:09 +0100
From: Andrey Konovalov <andreyknvl@...gle.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Catalin Marinas <catalin.marinas@....com>,
Vincenzo Frascino <vincenzo.frascino@....com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Alexander Potapenko <glider@...gle.com>,
Marco Elver <elver@...gle.com>
Cc: Will Deacon <will.deacon@....com>,
Andrey Ryabinin <aryabinin@...tuozzo.com>,
Peter Collingbourne <pcc@...gle.com>,
Evgenii Stepanov <eugenis@...gle.com>,
Branislav Rankov <Branislav.Rankov@....com>,
Kevin Brodsky <kevin.brodsky@....com>,
kasan-dev@...glegroups.com, linux-arm-kernel@...ts.infradead.org,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Andrey Konovalov <andreyknvl@...gle.com>
Subject: [PATCH v2 08/12] kasan, mm: optimize krealloc poisoning
Currently, krealloc() always calls ksize(), which unpoisons the whole
object including the redzone. This is inefficient, as kasan_krealloc()
repoisons the redzone for objects that fit into the same buffer.
This patch changes krealloc() instrumentation to use uninstrumented
__ksize() that doesn't unpoison the memory. Instead, kasan_kreallos()
is changed to unpoison the memory excluding the redzone.
For objects that don't fit into the old allocation, this patch disables
KASAN accessibility checks when copying memory into a new object instead
of unpoisoning it.
Signed-off-by: Andrey Konovalov <andreyknvl@...gle.com>
---
mm/kasan/common.c | 12 ++++++++++--
mm/slab_common.c | 20 ++++++++++++++------
2 files changed, 24 insertions(+), 8 deletions(-)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 7ea643f7e69c..a8a67dca5e55 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -476,7 +476,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
/*
* The object has already been unpoisoned by kasan_slab_alloc() for
- * kmalloc() or by ksize() for krealloc().
+ * kmalloc() or by kasan_krealloc() for krealloc().
*/
/*
@@ -526,7 +526,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
/*
* The object has already been unpoisoned by kasan_alloc_pages() for
- * alloc_pages() or by ksize() for krealloc().
+ * alloc_pages() or by kasan_krealloc() for krealloc().
*/
/*
@@ -554,8 +554,16 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object;
+ /*
+ * Unpoison the object's data.
+ * Part of it might already have been unpoisoned, but it's unknown
+ * how big that part is.
+ */
+ kasan_unpoison(object, size);
+
page = virt_to_head_page(object);
+ /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
if (unlikely(!PageSlab(page)))
return __kasan_kmalloc_large(object, size, flags);
else
diff --git a/mm/slab_common.c b/mm/slab_common.c
index dad70239b54c..60a2f49df6ce 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1140,19 +1140,27 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
void *ret;
size_t ks;
- if (likely(!ZERO_OR_NULL_PTR(p)) && !kasan_check_byte(p))
- return NULL;
-
- ks = ksize(p);
+ /* Don't use instrumented ksize to allow precise KASAN poisoning. */
+ if (likely(!ZERO_OR_NULL_PTR(p))) {
+ if (!kasan_check_byte(p))
+ return NULL;
+ ks = kfence_ksize(p) ?: __ksize(p);
+ } else
+ ks = 0;
+ /* If the object still fits, repoison it precisely. */
if (ks >= new_size) {
p = kasan_krealloc((void *)p, new_size, flags);
return (void *)p;
}
ret = kmalloc_track_caller(new_size, flags);
- if (ret && p)
- memcpy(ret, p, ks);
+ if (ret && p) {
+ /* Disable KASAN checks as the object's redzone is accessed. */
+ kasan_disable_current();
+ memcpy(ret, kasan_reset_tag(p), ks);
+ kasan_enable_current();
+ }
return ret;
}
--
2.30.0.365.g02bc693789-goog
Powered by blists - more mailing lists