[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220527084822.751087885@linuxfoundation.org>
Date: Fri, 27 May 2022 10:48:55 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Theodore Tso <tytso@....edu>,
Eric Biggers <ebiggers@...gle.com>,
Dominik Brodowski <linux@...inikbrodowski.net>,
"Jason A. Donenfeld" <Jason@...c4.com>
Subject: [PATCH 5.17 023/111] random: tie batched entropy generation to base_crng generation
From: "Jason A. Donenfeld" <Jason@...c4.com>
commit 0791e8b655cc373718f0f58800fdc625a3447ac5 upstream.
Now that we have an explicit base_crng generation counter, we don't need
a separate one for batched entropy. Rather, we can just move the
generation forward every time we change crng_init state or update the
base_crng key.
Cc: Theodore Ts'o <tytso@....edu>
Reviewed-by: Eric Biggers <ebiggers@...gle.com>
Reviewed-by: Dominik Brodowski <linux@...inikbrodowski.net>
Signed-off-by: Jason A. Donenfeld <Jason@...c4.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/char/random.c | 29 ++++++++---------------------
1 file changed, 8 insertions(+), 21 deletions(-)
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -430,8 +430,6 @@ static DEFINE_PER_CPU(struct crng, crngs
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
-static void invalidate_batched_entropy(void);
-
/*
* crng_fast_load() can be called by code in the interrupt service
* path. So we can't afford to dilly-dally. Returns the number of
@@ -454,7 +452,7 @@ static size_t crng_fast_load(const void
src++; crng_init_cnt++; len--; ret++;
}
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
- invalidate_batched_entropy();
+ ++base_crng.generation;
crng_init = 1;
}
spin_unlock_irqrestore(&base_crng.lock, flags);
@@ -531,7 +529,6 @@ static void crng_reseed(void)
WRITE_ONCE(base_crng.generation, next_gen);
WRITE_ONCE(base_crng.birth, jiffies);
if (crng_init < 2) {
- invalidate_batched_entropy();
crng_init = 2;
finalize_init = true;
}
@@ -1256,8 +1253,9 @@ int __init rand_initialize(void)
mix_pool_bytes(utsname(), sizeof(*(utsname())));
extract_entropy(base_crng.key, sizeof(base_crng.key));
+ ++base_crng.generation;
+
if (arch_init && trust_cpu && crng_init < 2) {
- invalidate_batched_entropy();
crng_init = 2;
pr_notice("crng init done (trusting CPU's manufacturer)\n");
}
@@ -1607,8 +1605,6 @@ static int __init random_sysctls_init(vo
device_initcall(random_sysctls_init);
#endif /* CONFIG_SYSCTL */
-static atomic_t batch_generation = ATOMIC_INIT(0);
-
struct batched_entropy {
union {
/*
@@ -1622,8 +1618,8 @@ struct batched_entropy {
u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
};
local_lock_t lock;
+ unsigned long generation;
unsigned int position;
- int generation;
};
/*
@@ -1643,14 +1639,14 @@ u64 get_random_u64(void)
unsigned long flags;
struct batched_entropy *batch;
static void *previous;
- int next_gen;
+ unsigned long next_gen;
warn_unseeded_randomness(&previous);
local_lock_irqsave(&batched_entropy_u64.lock, flags);
batch = raw_cpu_ptr(&batched_entropy_u64);
- next_gen = atomic_read(&batch_generation);
+ next_gen = READ_ONCE(base_crng.generation);
if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
next_gen != batch->generation) {
_get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
@@ -1677,14 +1673,14 @@ u32 get_random_u32(void)
unsigned long flags;
struct batched_entropy *batch;
static void *previous;
- int next_gen;
+ unsigned long next_gen;
warn_unseeded_randomness(&previous);
local_lock_irqsave(&batched_entropy_u32.lock, flags);
batch = raw_cpu_ptr(&batched_entropy_u32);
- next_gen = atomic_read(&batch_generation);
+ next_gen = READ_ONCE(base_crng.generation);
if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
next_gen != batch->generation) {
_get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
@@ -1700,15 +1696,6 @@ u32 get_random_u32(void)
}
EXPORT_SYMBOL(get_random_u32);
-/* It's important to invalidate all potential batched entropy that might
- * be stored before the crng is initialized, which we can do lazily by
- * bumping the generation counter.
- */
-static void invalidate_batched_entropy(void)
-{
- atomic_inc(&batch_generation);
-}
-
/**
* randomize_page - Generate a random, page aligned address
* @start: The smallest acceptable address the caller will take.
Powered by blists - more mailing lists