[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220907071023.3838692-5-feng.tang@intel.com>
Date: Wed, 7 Sep 2022 15:10:23 +0800
From: Feng Tang <feng.tang@...el.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Vlastimil Babka <vbabka@...e.cz>,
Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Hyeonggon Yoo <42.hyeyoo@...il.com>,
Dmitry Vyukov <dvyukov@...gle.com>,
Jonathan Corbet <corbet@....net>
Cc: Dave Hansen <dave.hansen@...el.com>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, kasan-dev@...glegroups.com,
Feng Tang <feng.tang@...el.com>
Subject: [PATCH v5 4/4] mm/slub: extend redzone check to extra allocated kmalloc space than requested
kmalloc will round up the request size to a fixed size (mostly power
of 2), so there could be a extra space than what is requested, whose
size is the actual buffer size minus original request size.
To better detect out of bound access or abuse of this space, add
redzone sanity check for it.
And in current kernel, some kmalloc user already knows the existence
of the space and utilizes it after calling 'ksize()' to know the real
size of the allocated buffer. So we skip the sanity check for objects
which have been called with ksize(), as treating them as legitimate
users.
Suggested-by: Vlastimil Babka <vbabka@...e.cz>
Signed-off-by: Feng Tang <feng.tang@...el.com>
---
mm/slab.h | 4 ++++
mm/slab_common.c | 4 ++++
mm/slub.c | 57 +++++++++++++++++++++++++++++++++++++++++++++---
3 files changed, 62 insertions(+), 3 deletions(-)
diff --git a/mm/slab.h b/mm/slab.h
index 20f9e2a9814f..0bc91b30b031 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -885,4 +885,8 @@ void __check_heap_object(const void *ptr, unsigned long n,
}
#endif
+#ifdef CONFIG_SLUB_DEBUG
+void skip_orig_size_check(struct kmem_cache *s, const void *object);
+#endif
+
#endif /* MM_SLAB_H */
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8e13e3aac53f..5106667d6adb 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1001,6 +1001,10 @@ size_t __ksize(const void *object)
return folio_size(folio);
}
+#ifdef CONFIG_SLUB_DEBUG
+ skip_orig_size_check(folio_slab(folio)->slab_cache, object);
+#endif
+
return slab_ksize(folio_slab(folio)->slab_cache);
}
diff --git a/mm/slub.c b/mm/slub.c
index f523601d3fcf..2f0302136604 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -812,12 +812,27 @@ static inline void set_orig_size(struct kmem_cache *s,
if (!slub_debug_orig_size(s))
return;
+#ifdef CONFIG_KASAN_GENERIC
+ /*
+ * KASAN could save its free meta data in the start part of object
+ * area, so skip the redzone check if kasan's meta data size is
+ * bigger enough to possibly overlap with kmalloc redzone
+ */
+ if (s->kasan_info.free_meta_size_in_object * 2 >= s->object_size)
+ orig_size = s->object_size;
+#endif
+
p += get_info_end(s);
p += sizeof(struct track) * 2;
*(unsigned int *)p = orig_size;
}
+void skip_orig_size_check(struct kmem_cache *s, const void *object)
+{
+ set_orig_size(s, (void *)object, s->object_size);
+}
+
static unsigned int get_orig_size(struct kmem_cache *s, void *object)
{
void *p = kasan_reset_tag(object);
@@ -949,13 +964,34 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
static void init_object(struct kmem_cache *s, void *object, u8 val)
{
u8 *p = kasan_reset_tag(object);
+ unsigned int orig_size = s->object_size;
- if (s->flags & SLAB_RED_ZONE)
+ if (s->flags & SLAB_RED_ZONE) {
memset(p - s->red_left_pad, val, s->red_left_pad);
+ if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
+ unsigned int zone_start;
+
+ orig_size = get_orig_size(s, object);
+ zone_start = orig_size;
+
+ if (!freeptr_outside_object(s))
+ zone_start = max_t(unsigned int, orig_size,
+ s->offset + sizeof(void *));
+
+ /*
+ * Redzone the extra allocated space by kmalloc
+ * than requested.
+ */
+ if (zone_start < s->object_size)
+ memset(p + zone_start, val,
+ s->object_size - zone_start);
+ }
+ }
+
if (s->flags & __OBJECT_POISON) {
- memset(p, POISON_FREE, s->object_size - 1);
- p[s->object_size - 1] = POISON_END;
+ memset(p, POISON_FREE, orig_size - 1);
+ p[orig_size - 1] = POISON_END;
}
if (s->flags & SLAB_RED_ZONE)
@@ -1103,6 +1139,7 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
{
u8 *p = object;
u8 *endobject = object + s->object_size;
+ unsigned int orig_size;
if (s->flags & SLAB_RED_ZONE) {
if (!check_bytes_and_report(s, slab, object, "Left Redzone",
@@ -1112,6 +1149,20 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
if (!check_bytes_and_report(s, slab, object, "Right Redzone",
endobject, val, s->inuse - s->object_size))
return 0;
+
+ if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
+ orig_size = get_orig_size(s, object);
+
+ if (!freeptr_outside_object(s))
+ orig_size = max_t(unsigned int, orig_size,
+ s->offset + sizeof(void *));
+ if (s->object_size > orig_size &&
+ !check_bytes_and_report(s, slab, object,
+ "kmalloc Redzone", p + orig_size,
+ val, s->object_size - orig_size)) {
+ return 0;
+ }
+ }
} else {
if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
check_bytes_and_report(s, slab, p, "Alignment padding",
--
2.34.1
Powered by blists - more mailing lists