[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y3sc1G6WEKte4Awd@feng-clx>
Date: Mon, 21 Nov 2022 14:38:12 +0800
From: Feng Tang <feng.tang@...el.com>
To: Vlastimil Babka <vbabka@...e.cz>
CC: Andrew Morton <akpm@...ux-foundation.org>,
Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Hyeonggon Yoo <42.hyeyoo@...il.com>,
"Dmitry Vyukov" <dvyukov@...gle.com>,
Andrey Konovalov <andreyknvl@...il.com>,
"Kees Cook" <keescook@...omium.org>,
"Hansen, Dave" <dave.hansen@...el.com>,
"linux-mm@...ck.org" <linux-mm@...ck.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"kasan-dev@...glegroups.com" <kasan-dev@...glegroups.com>
Subject: Re: [PATCH v7 0/3] mm/slub: extend redzone check for kmalloc objects
On Fri, Nov 11, 2022 at 04:29:43PM +0800, Tang, Feng wrote:
> On Fri, Nov 11, 2022 at 04:16:32PM +0800, Vlastimil Babka wrote:
> > > for (shift = 3; shift <= 12; shift++) {
> > > size = 1 << shift;
> > > buf = kmalloc(size + 4, GFP_KERNEL);
> > > /* We have 96, 196 kmalloc size, which is not power of 2 */
> > > if (size == 64 || size == 128)
> > > oob_size = 16;
> > > else
> > > oob_size = size - 4;
> > > memset(buf + size + 4, 0xee, oob_size);
> > > kfree(buf);
> > > }
> >
> > Sounds like a new slub_kunit test would be useful? :) doesn't need to be
> > that exhaustive wrt all sizes, we could just pick one and check that a write
> > beyond requested kmalloc size is detected?
>
> Just git-grepped out slub_kunit.c :), will try to add a case to it.
> I'll also check if the case will also be caught by other sanitizer
> tools like kasan/kfence etc.
Just checked, kasan has already has API to disable kasan check
temporarily, and I did see sometime kfence can chime in (4 out of 178
runs) so we need skip kfenced address.
Here is the draft patch, thanks!
>From 45bf8d0072e532f43063dbda44c6bb3adcc388b6 Mon Sep 17 00:00:00 2001
From: Feng Tang <feng.tang@...el.com>
Date: Mon, 21 Nov 2022 13:17:11 +0800
Subject: [PATCH] mm/slub, kunit: Add a case for kmalloc redzone functionality
kmalloc redzone check for slub has been merged, and it's better to add
a kunit case for it, which is inspired by a real-world case as described
in commit 120ee599b5bf ("staging: octeon-usb: prevent memory corruption"):
"
octeon-hcd will crash the kernel when SLOB is used. This usually happens
after the 18-byte control transfer when a device descriptor is read.
The DMA engine is always transfering full 32-bit words and if the
transfer is shorter, some random garbage appears after the buffer.
The problem is not visible with SLUB since it rounds up the allocations
to word boundary, and the extra bytes will go undetected.
"
Suggested-by: Vlastimil Babka <vbabka@...e.cz>
Signed-off-by: Feng Tang <feng.tang@...el.com>
---
lib/slub_kunit.c | 42 ++++++++++++++++++++++++++++++++++++++++++
mm/slab.h | 15 +++++++++++++++
mm/slub.c | 4 ++--
3 files changed, 59 insertions(+), 2 deletions(-)
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 7a0564d7cb7a..0653eed19bff 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -120,6 +120,47 @@ static void test_clobber_redzone_free(struct kunit *test)
kmem_cache_destroy(s);
}
+
+/*
+ * This case is simulating a real world case, that a device driver
+ * requests 18 bytes buffer, but the device HW has obligation to
+ * operate on 32 bits granularity, so it may actually read or write
+ * 20 bytes to the buffer, and possibly pollute 2 extra bytes after
+ * the requested space.
+ */
+static void test_kmalloc_redzone_access(struct kunit *test)
+{
+ u8 *p;
+
+ if (!is_slub_debug_flags_enabled(SLAB_STORE_USER | SLAB_RED_ZONE))
+ kunit_skip(test, "Test required SLAB_STORE_USER & SLAB_RED_ZONE flags on");
+
+ p = kmalloc(18, GFP_KERNEL);
+
+#ifdef CONFIG_KFENCE
+ {
+ int max_retry = 10;
+
+ while (is_kfence_address(p) && max_retry--) {
+ kfree(p);
+ p = kmalloc(18, GFP_KERNEL);
+ }
+
+ if (!max_retry)
+ kunit_skip(test, "Fail to get non-kfenced memory");
+ }
+#endif
+
+ kasan_disable_current();
+
+ p[18] = 0xab;
+ p[19] = 0xab;
+ kfree(p);
+
+ KUNIT_EXPECT_EQ(test, 3, slab_errors);
+ kasan_enable_current();
+}
+
static int test_init(struct kunit *test)
{
slab_errors = 0;
@@ -139,6 +180,7 @@ static struct kunit_case test_cases[] = {
#endif
KUNIT_CASE(test_clobber_redzone_free),
+ KUNIT_CASE(test_kmalloc_redzone_access),
{}
};
diff --git a/mm/slab.h b/mm/slab.h
index e3b3231af742..72f7a85e01ab 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -413,6 +413,17 @@ static inline bool __slub_debug_enabled(void)
{
return static_branch_unlikely(&slub_debug_enabled);
}
+
+extern slab_flags_t slub_debug;
+
+/*
+ * This should only be used in post-boot time, after 'slub_debug'
+ * gets initialized.
+ */
+static inline bool is_slub_debug_flags_enabled(slab_flags_t flags)
+{
+ return (slub_debug & flags) == flags;
+}
#else
static inline void print_tracking(struct kmem_cache *s, void *object)
{
@@ -421,6 +432,10 @@ static inline bool __slub_debug_enabled(void)
{
return false;
}
+static inline bool is_slub_debug_flags_enabled(slab_flags_t flags)
+{
+ return false;
+}
#endif
/*
diff --git a/mm/slub.c b/mm/slub.c
index a24b71041b26..6ef72b8f6291 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -638,9 +638,9 @@ static inline void *restore_red_left(struct kmem_cache *s, void *p)
* Debug settings:
*/
#if defined(CONFIG_SLUB_DEBUG_ON)
-static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
+slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
#else
-static slab_flags_t slub_debug;
+slab_flags_t slub_debug;
#endif
static char *slub_debug_string;
--
2.34.1
Powered by blists - more mailing lists