[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220124183943.409514111@linuxfoundation.org>
Date: Mon, 24 Jan 2022 19:40:47 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Eric Biggers <ebiggers@...gle.com>,
"Paul E. McKenney" <paulmck@...nel.org>,
"Jason A. Donenfeld" <Jason@...c4.com>
Subject: [PATCH 4.19 009/239] random: fix data race on crng_node_pool
From: Eric Biggers <ebiggers@...gle.com>
commit 5d73d1e320c3fd94ea15ba5f79301da9a8bcc7de upstream.
extract_crng() and crng_backtrack_protect() load crng_node_pool with a
plain load, which causes undefined behavior if do_numa_crng_init()
modifies it concurrently.
Fix this by using READ_ONCE(). Note: as per the previous discussion
https://lore.kernel.org/lkml/20211219025139.31085-1-ebiggers@kernel.org/T/#u,
READ_ONCE() is believed to be sufficient here, and it was requested that
it be used here instead of smp_load_acquire().
Also change do_numa_crng_init() to set crng_node_pool using
cmpxchg_release() instead of mb() + cmpxchg(), as the former is
sufficient here but is more lightweight.
Fixes: 1e7f583af67b ("random: make /dev/urandom scalable for silly userspace programs")
Cc: stable@...r.kernel.org
Signed-off-by: Eric Biggers <ebiggers@...gle.com>
Acked-by: Paul E. McKenney <paulmck@...nel.org>
Signed-off-by: Jason A. Donenfeld <Jason@...c4.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/char/random.c | 42 ++++++++++++++++++++++--------------------
1 file changed, 22 insertions(+), 20 deletions(-)
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -831,8 +831,8 @@ static void do_numa_crng_init(struct wor
crng_initialize(crng);
pool[i] = crng;
}
- mb();
- if (cmpxchg(&crng_node_pool, NULL, pool)) {
+ /* pairs with READ_ONCE() in select_crng() */
+ if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) {
for_each_node(i)
kfree(pool[i]);
kfree(pool);
@@ -845,8 +845,26 @@ static void numa_crng_init(void)
{
schedule_work(&numa_crng_init_work);
}
+
+static struct crng_state *select_crng(void)
+{
+ struct crng_state **pool;
+ int nid = numa_node_id();
+
+ /* pairs with cmpxchg_release() in do_numa_crng_init() */
+ pool = READ_ONCE(crng_node_pool);
+ if (pool && pool[nid])
+ return pool[nid];
+
+ return &primary_crng;
+}
#else
static void numa_crng_init(void) {}
+
+static struct crng_state *select_crng(void)
+{
+ return &primary_crng;
+}
#endif
/*
@@ -995,15 +1013,7 @@ static void _extract_crng(struct crng_st
static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
{
- struct crng_state *crng = NULL;
-
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
-#endif
- crng = &primary_crng;
- _extract_crng(crng, out);
+ _extract_crng(select_crng(), out);
}
/*
@@ -1032,15 +1042,7 @@ static void _crng_backtrack_protect(stru
static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
{
- struct crng_state *crng = NULL;
-
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
-#endif
- crng = &primary_crng;
- _crng_backtrack_protect(crng, tmp, used);
+ _crng_backtrack_protect(select_crng(), tmp, used);
}
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
Powered by blists - more mailing lists