[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221024104425.16423-3-wagi@monom.org>
Date: Mon, 24 Oct 2022 12:44:18 +0200
From: Daniel Wagner <wagi@...om.org>
To: LKML <linux-kernel@...r.kernel.org>,
linux-rt-users <linux-rt-users@...r.kernel.org>,
Steven Rostedt <rostedt@...dmis.org>,
Thomas Gleixner <tglx@...utronix.de>,
Carsten Emde <C.Emde@...dl.org>,
John Kacur <jkacur@...hat.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Tom Zanussi <tom.zanussi@...ux.intel.com>,
Clark Williams <williams@...hat.com>,
Pavel Machek <pavel@...x.de>
Cc: Daniel Wagner <dwagner@...e.de>
Subject: [PATCH RT 2/9] random: Bring back the local_locks
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
v4.19.255-rt114-rc1 stable review patch.
If anyone has any objections, please let me know.
-----------
As part of the backports the random code lost its local_lock_t type and
the whole operation became a local_irq_{disable|enable}() simply because
the older kernel did not provide those primitives.
RT as of v4.9 has a slightly different variant of local_locks.
Replace the local_irq_*() operations with matching local_lock_irq*()
operations which were there as part of commit
77760fd7f7ae3 ("random: remove batched entropy locking")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Link: https://lore.kernel.org/all/20220819092446.980320-2-bigeasy@linutronix.de/
Signed-off-by: Daniel Wagner <dwagner@...e.de>
---
drivers/char/random.c | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2be38780a7f7..0fd0462054bd 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -53,6 +53,7 @@
#include <linux/uaccess.h>
#include <linux/siphash.h>
#include <linux/uio.h>
+#include <linux/locallock.h>
#include <crypto/chacha20.h>
#include <crypto/blake2s.h>
#include <asm/processor.h>
@@ -229,10 +230,12 @@ static struct {
struct crng {
u8 key[CHACHA20_KEY_SIZE];
unsigned long generation;
+ struct local_irq_lock lock;
};
static DEFINE_PER_CPU(struct crng, crngs) = {
- .generation = ULONG_MAX
+ .generation = ULONG_MAX,
+ .lock.lock = __SPIN_LOCK_UNLOCKED(crngs.lock.lock),
};
/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
@@ -362,7 +365,7 @@ static void crng_make_state(u32 chacha_state[CHACHA20_BLOCK_SIZE / sizeof(u32)],
if (unlikely(crng_has_old_seed()))
crng_reseed();
- local_irq_save(flags);
+ local_lock_irqsave(crngs.lock, flags);
crng = raw_cpu_ptr(&crngs);
/*
@@ -387,7 +390,7 @@ static void crng_make_state(u32 chacha_state[CHACHA20_BLOCK_SIZE / sizeof(u32)],
* should wind up here immediately.
*/
crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
- local_irq_restore(flags);
+ local_unlock_irqrestore(crngs.lock, flags);
}
static void _get_random_bytes(void *buf, size_t len)
@@ -505,11 +508,13 @@ struct batch_ ##type { \
* formula of (integer_blocks + 0.5) * CHACHA20_BLOCK_SIZE. \
*/ \
type entropy[CHACHA20_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
+ struct local_irq_lock lock; \
unsigned long generation; \
unsigned int position; \
}; \
\
static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
+ .lock.lock = __SPIN_LOCK_UNLOCKED(batched_entropy_ ##type.lock.lock), \
.position = UINT_MAX \
}; \
\
@@ -527,7 +532,7 @@ type get_random_ ##type(void) \
return ret; \
} \
\
- local_irq_save(flags); \
+ local_lock_irqsave(batched_entropy_ ##type.lock, flags); \
batch = raw_cpu_ptr(&batched_entropy_##type); \
\
next_gen = READ_ONCE(base_crng.generation); \
@@ -541,7 +546,7 @@ type get_random_ ##type(void) \
ret = batch->entropy[batch->position]; \
batch->entropy[batch->position] = 0; \
++batch->position; \
- local_irq_restore(flags); \
+ local_unlock_irqrestore(batched_entropy_ ##type.lock, flags); \
return ret; \
} \
EXPORT_SYMBOL(get_random_ ##type);
--
2.38.0
Powered by blists - more mailing lists