[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <4925f0fe15e9cda5f9d90d75c6e15acdc413ef9e.1670890407.git.david.keisarschm@mail.huji.ac.il>
Date: Tue, 13 Dec 2022 12:34:57 +0200
From: david.keisarschm@...l.huji.ac.il
To: linux-kernel@...r.kernel.org, Christoph Lameter <cl@...ux.com>,
Pekka Enberg <penberg@...nel.org>,
David Rientjes <rientjes@...gle.com>,
Joonsoo Kim <iamjoonsoo.kim@....com>,
Andrew Morton <akpm@...ux-foundation.org>,
Vlastimil Babka <vbabka@...e.cz>,
Roman Gushchin <roman.gushchin@...ux.dev>,
Hyeonggon Yoo <42.hyeyoo@...il.com>
Cc: David <david.keisarschm@...l.huji.ac.il>, aksecurity@...il.com,
ilay.bahat1@...il.com, linux-mm@...ck.org
Subject: [PATCH v2 1/3] Replace invocation of weak PRNG in mm/slab.c
From: David <david.keisarschm@...l.huji.ac.il>
We changed the invocation
of prandom_u32_state to get_random_u32.
We also changed the freelist_init_state
to struct instead of a union,
since the rnd_state is not needed anymore
- get_random_u32 maintains its own state.
This change it important since it
is make the slab allocator randomization
stronger.
Signed-off-by: David <david.keisarschm@...l.huji.ac.il>
---
mm/slab.c | 20 ++++++++------------
1 file changed, 8 insertions(+), 12 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index 92d6b1d48..1476104f4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2360,20 +2360,17 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab)
#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Hold information during a freelist initialization */
-union freelist_init_state {
- struct {
- unsigned int pos;
- unsigned int *list;
- unsigned int count;
- };
- struct rnd_state rnd_state;
+struct freelist_init_state {
+ unsigned int pos;
+ unsigned int *list;
+ unsigned int count;
};
/*
* Initialize the state based on the randomization method available.
* return true if the pre-computed list is available, false otherwise.
*/
-static bool freelist_state_initialize(union freelist_init_state *state,
+static bool freelist_state_initialize(struct freelist_init_state *state,
struct kmem_cache *cachep,
unsigned int count)
{
@@ -2385,7 +2382,6 @@ static bool freelist_state_initialize(union freelist_init_state *state,
/* Use a random state if the pre-computed list is not available */
if (!cachep->random_seq) {
- prandom_seed_state(&state->rnd_state, rand);
ret = false;
} else {
state->list = cachep->random_seq;
@@ -2397,7 +2393,7 @@ static bool freelist_state_initialize(union freelist_init_state *state,
}
/* Get the next entry on the list and randomize it using a random shift */
-static freelist_idx_t next_random_slot(union freelist_init_state *state)
+static freelist_idx_t next_random_slot(struct freelist_init_state *state)
{
if (state->pos >= state->count)
state->pos = 0;
@@ -2418,7 +2414,7 @@ static void swap_free_obj(struct slab *slab, unsigned int a, unsigned int b)
static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab)
{
unsigned int objfreelist = 0, i, rand, count = cachep->num;
- union freelist_init_state state;
+ struct freelist_init_state state;
bool precomputed;
if (count < 2)
@@ -2447,7 +2443,7 @@ static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab)
/* Fisher-Yates shuffle */
for (i = count - 1; i > 0; i--) {
- rand = prandom_u32_state(&state.rnd_state);
+ rand = get_random_u32();
rand %= (i + 1);
swap_free_obj(slab, i, rand);
}
--
2.38.0
Powered by blists - more mailing lists