[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220210131304.97224-1-Jason@zx2c4.com>
Date: Thu, 10 Feb 2022 14:13:04 +0100
From: "Jason A. Donenfeld" <Jason@...c4.com>
To: linux-kernel@...r.kernel.org
Cc: "Jason A. Donenfeld" <Jason@...c4.com>,
Theodore Ts'o <tytso@....edu>,
Dominik Brodowski <linux@...inikbrodowski.net>
Subject: [PATCH v2] random: tie batched entropy generation to base_crng generation
Now that we have an explicit base_crng generation counter, we don't need
a separate one for batched entropy. Rather, we can just move the
generation forward every time we change crng_init state or update the
base_crng key.
Cc: Theodore Ts'o <tytso@....edu>
Reviewed-by: Dominik Brodowski <linux@...inikbrodowski.net>
Signed-off-by: Jason A. Donenfeld <Jason@...c4.com>
---
v2 always increments the generation after extraction, as suggested by
Dominik.
drivers/char/random.c | 29 ++++++++---------------------
1 file changed, 8 insertions(+), 21 deletions(-)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5beb421ec12b..57d36f13e3a6 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -431,8 +431,6 @@ static DEFINE_PER_CPU(struct crng, crngs) = {
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
-static void invalidate_batched_entropy(void);
-
/*
* crng_fast_load() can be called by code in the interrupt service
* path. So we can't afford to dilly-dally. Returns the number of
@@ -455,7 +453,7 @@ static size_t crng_fast_load(const void *cp, size_t len)
src++; crng_init_cnt++; len--; ret++;
}
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
- invalidate_batched_entropy();
+ ++base_crng.generation;
crng_init = 1;
}
spin_unlock_irqrestore(&base_crng.lock, flags);
@@ -530,7 +528,6 @@ static void crng_reseed(void)
WRITE_ONCE(base_crng.generation, next_gen);
base_crng.birth = jiffies;
if (crng_init < 2) {
- invalidate_batched_entropy();
crng_init = 2;
finalize_init = true;
}
@@ -1277,8 +1274,9 @@ int __init rand_initialize(void)
mix_pool_bytes(utsname(), sizeof(*(utsname())));
extract_entropy(base_crng.key, sizeof(base_crng.key));
+ ++base_crng.generation;
+
if (arch_init && trust_cpu && crng_init < 2) {
- invalidate_batched_entropy();
crng_init = 2;
pr_notice("crng init done (trusting CPU's manufacturer)\n");
}
@@ -1628,8 +1626,6 @@ static int __init random_sysctls_init(void)
device_initcall(random_sysctls_init);
#endif /* CONFIG_SYSCTL */
-static atomic_t batch_generation = ATOMIC_INIT(0);
-
struct batched_entropy {
union {
/* We make this 1.5x a ChaCha block, so that we get the
@@ -1642,8 +1638,8 @@ struct batched_entropy {
u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
};
local_lock_t lock;
+ unsigned long generation;
unsigned int position;
- int generation;
};
/*
@@ -1662,14 +1658,14 @@ u64 get_random_u64(void)
unsigned long flags;
struct batched_entropy *batch;
static void *previous;
- int next_gen;
+ unsigned long next_gen;
warn_unseeded_randomness(&previous);
local_lock_irqsave(&batched_entropy_u64.lock, flags);
batch = raw_cpu_ptr(&batched_entropy_u64);
- next_gen = atomic_read(&batch_generation);
+ next_gen = READ_ONCE(base_crng.generation);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0 ||
next_gen != batch->generation) {
_get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
@@ -1695,14 +1691,14 @@ u32 get_random_u32(void)
unsigned long flags;
struct batched_entropy *batch;
static void *previous;
- int next_gen;
+ unsigned long next_gen;
warn_unseeded_randomness(&previous);
local_lock_irqsave(&batched_entropy_u32.lock, flags);
batch = raw_cpu_ptr(&batched_entropy_u32);
- next_gen = atomic_read(&batch_generation);
+ next_gen = READ_ONCE(base_crng.generation);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0 ||
next_gen != batch->generation) {
_get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
@@ -1718,15 +1714,6 @@ u32 get_random_u32(void)
}
EXPORT_SYMBOL(get_random_u32);
-/* It's important to invalidate all potential batched entropy that might
- * be stored before the crng is initialized, which we can do lazily by
- * bumping the generation counter.
- */
-static void invalidate_batched_entropy(void)
-{
- atomic_inc(&batch_generation);
-}
-
/**
* randomize_page - Generate a random, page aligned address
* @start: The smallest acceptable address the caller will take.
--
2.35.0
Powered by blists - more mailing lists