[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1580383064-16536-1-git-send-email-vjitta@codeaurora.org>
Date: Thu, 30 Jan 2020 16:47:44 +0530
From: vjitta@...eaurora.org
To: cl@...ux.com, penberg@...nel.org, rientjes@...gle.com,
iamjoonsoo.kim@....com, akpm@...ux-foundation.org,
linux-mm@...ck.org
Cc: linux-kernel@...r.kernel.org, vinmenon@...eaurora.org,
kernel-team@...roid.com, Vijayanand Jitta <vjitta@...eaurora.org>
Subject: [PATCH] mm: slub: reinitialize random sequence cache on slab object update
From: Vijayanand Jitta <vjitta@...eaurora.org>
Random sequence cache is precomputed during slab object creation
based up on the object size and no of objects per slab. These could
be changed when flags like SLAB_STORE_USER, SLAB_POISON are updated
from sysfs. So when shuffle_freelist is called during slab_alloc it
uses updated object count to access the precomputed random sequence
cache. This could result in incorrect access of the random sequence
cache which could further result in slab corruption. Fix this by
reinitializing the random sequence cache up on slab object update.
A sample panic trace when write to slab_store_user was attempted.
Call trace0:
exception
set_freepointer(inline)
shuffle_freelist(inline)
new_slab+0x688/0x690
___slab_alloc+0x548/0x6f8
kmem_cache_alloc+0x3dc/0x418
zs_malloc+0x60/0x578
zram_bvec_rw+0x66c/0xaa0
zram_make_request+0x190/0x2c8
generic_make_request+0x1f8/0x420
submit_bio+0x140/0x1d8
submit_bh_wbc+0x1a0/0x1e0
__block_write_full_page+0x3a0/0x5e8
block_write_full_page+0xec/0x108
blkdev_writepage+0x2c/0x38
__writepage+0x34/0x98
write_cache_pages+0x33c/0x598
generic_writepages+0x54/0x98
blkdev_writepages+0x24/0x30
do_writepages+0x90/0x138
__filemap_fdatawrite_range+0xc0/0x128
file_write_and_wait_range+0x44/0xa0
blkdev_fsync+0x38/0x68
__arm64_sys_fsync+0x6c/0xb8
Signed-off-by: Vijayanand Jitta <vjitta@...eaurora.org>
---
mm/slub.c | 25 +++++++++++++++++++++++++
1 file changed, 25 insertions(+)
diff --git a/mm/slub.c b/mm/slub.c
index 0ab92ec..b88dd0f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1533,6 +1533,24 @@ static int init_cache_random_seq(struct kmem_cache *s)
return 0;
}
+/* re-initialize the random sequence cache */
+static int reinit_cache_random_seq(struct kmem_cache *s)
+{
+ int err;
+
+ if (s->random_seq) {
+ cache_random_seq_destroy(s);
+ err = init_cache_random_seq(s);
+
+ if (err) {
+ pr_err("SLUB: Unable to re-initialize random sequence cache for %s\n",
+ s->name);
+ return err;
+ }
+ }
+
+ return 0;
+}
/* Initialize each random sequence freelist per cache */
static void __init init_freelist_randomization(void)
{
@@ -1607,6 +1625,10 @@ static inline int init_cache_random_seq(struct kmem_cache *s)
{
return 0;
}
+static int reinit_cache_random_seq(struct kmem_cache *s)
+{
+ return 0;
+}
static inline void init_freelist_randomization(void) { }
static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
{
@@ -5192,6 +5214,7 @@ static ssize_t red_zone_store(struct kmem_cache *s,
s->flags |= SLAB_RED_ZONE;
}
calculate_sizes(s, -1);
+ reinit_cache_random_seq(s);
return length;
}
SLAB_ATTR(red_zone);
@@ -5212,6 +5235,7 @@ static ssize_t poison_store(struct kmem_cache *s,
s->flags |= SLAB_POISON;
}
calculate_sizes(s, -1);
+ reinit_cache_random_seq(s);
return length;
}
SLAB_ATTR(poison);
@@ -5233,6 +5257,7 @@ static ssize_t store_user_store(struct kmem_cache *s,
s->flags |= SLAB_STORE_USER;
}
calculate_sizes(s, -1);
+ reinit_cache_random_seq(s);
return length;
}
SLAB_ATTR(store_user);
--
QUALCOMM INDIA, on behalf of Qualcomm Innovation Center, Inc. is a
member of the Code Aurora Forum, hosted by The Linux Foundation
1.9.1
Powered by blists - more mailing lists