[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220801123945.43081-1-Jason@zx2c4.com>
Date: Mon, 1 Aug 2022 14:39:46 +0200
From: "Jason A. Donenfeld" <Jason@...c4.com>
To: bigeasy@...utronix.de, linux-kernel@...r.kernel.org,
Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
John Ogness <john.ogness@...utronix.de>,
Mike Galbraith <efault@....de>, Petr Mladek <pmladek@...e.com>,
Rasmus Villemoes <linux@...musvillemoes.dk>,
Sergey Senozhatsky <senozhatsky@...omium.org>,
Steven Rostedt <rostedt@...dmis.org>,
Theodore Ts'o <tytso@....edu>,
Thomas Gleixner <tglx@...utronix.de>
Cc: "Jason A. Donenfeld" <Jason@...c4.com>
Subject: [PATCH v4] lib/vsprintf: defer filling siphash key on RT
On RT, we can't call get_random_bytes() from inside of the raw locks
that callers of vsprintf might take, because get_random_bytes() takes
normal spinlocks. So on those RT systems, defer the siphash key
generation to a worker.
Also, avoid using a static_branch, as this isn't the fast path.
Using static_branch_likely() to signal that ptr_key has been filled is a
bit much given that it is not a fast path.
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Reported-by: Mike Galbraith <efault@....de>
Signed-off-by: Jason A. Donenfeld <Jason@...c4.com>
---
Sebastian - feel free to take this and tweak it as needed. Sending this
mostly as something illustrative of what the "simpler" thing would be
that I had in mind. -Jason
lib/vsprintf.c | 35 ++++++++++++++++++++---------------
1 file changed, 20 insertions(+), 15 deletions(-)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 3c1853a9d1c0..5a67f6f65ddc 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -750,37 +750,42 @@ static int __init debug_boot_weak_hash_enable(char *str)
}
early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
-static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key);
+static bool filled_ptr_key __read_mostly;
+static siphash_key_t ptr_key __read_mostly;
-static void enable_ptr_key_workfn(struct work_struct *work)
+static void fill_ptr_key_workfn(struct work_struct *work)
{
- static_branch_enable(&filled_random_ptr_key);
+ if (READ_ONCE(filled_ptr_key))
+ return;
+ get_random_bytes(&ptr_key, sizeof(ptr_key));
+ /* Pairs with smp_rmb() before reading ptr_key. */
+ smp_wmb();
+ WRITE_ONCE(filled_ptr_key, true);
}
/* Maps a pointer to a 32 bit unique identifier. */
static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
{
- static siphash_key_t ptr_key __read_mostly;
unsigned long hashval;
- if (!static_branch_likely(&filled_random_ptr_key)) {
- static bool filled = false;
+ if (!READ_ONCE(filled_ptr_key)) {
static DEFINE_SPINLOCK(filling);
- static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
unsigned long flags;
- if (!system_unbound_wq || !rng_is_initialized() ||
- !spin_trylock_irqsave(&filling, flags))
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && rng_is_initialized()) {
+ static DECLARE_WORK(fill_ptr_key_work, fill_ptr_key_workfn);
+ queue_work(system_unbound_wq, &fill_ptr_key_work);
return -EAGAIN;
-
- if (!filled) {
- get_random_bytes(&ptr_key, sizeof(ptr_key));
- queue_work(system_unbound_wq, &enable_ptr_key_work);
- filled = true;
}
+
+ if (!rng_is_initialized() || !spin_trylock_irqsave(&filling, flags))
+ return -EAGAIN;
+
+ fill_ptr_key_workfn(NULL);
spin_unlock_irqrestore(&filling, flags);
}
-
+ /* Pairs with smp_wmb() after writing ptr_key. */
+ smp_rmb();
#ifdef CONFIG_64BIT
hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
--
2.35.1
Powered by blists - more mailing lists