[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220623164345.055051908@linuxfoundation.org>
Date: Thu, 23 Jun 2022 18:41:40 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org,
Dominik Brodowski <linux@...inikbrodowski.net>,
"Jason A. Donenfeld" <Jason@...c4.com>
Subject: [PATCH 4.14 066/237] random: use IS_ENABLED(CONFIG_NUMA) instead of ifdefs
From: "Jason A. Donenfeld" <Jason@...c4.com>
commit 7b87324112df2e1f9b395217361626362dcfb9fb upstream.
Rather than an awkward combination of ifdefs and __maybe_unused, we can
ensure more source gets parsed, regardless of the configuration, by
using IS_ENABLED for the CONFIG_NUMA conditional code. This makes things
cleaner and easier to follow.
I've confirmed that on !CONFIG_NUMA, we don't wind up with excess code
by accident; the generated object file is the same.
Reviewed-by: Dominik Brodowski <linux@...inikbrodowski.net>
Signed-off-by: Jason A. Donenfeld <Jason@...c4.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/char/random.c | 32 ++++++++++++--------------------
1 file changed, 12 insertions(+), 20 deletions(-)
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -761,7 +761,6 @@ static int credit_entropy_bits_safe(stru
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
-#ifdef CONFIG_NUMA
/*
* Hack to deal with crazy userspace progams when they are all trying
* to access /dev/urandom in parallel. The programs are almost
@@ -769,7 +768,6 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init
* their brain damage.
*/
static struct crng_state **crng_node_pool __read_mostly;
-#endif
static void invalidate_batched_entropy(void);
@@ -816,7 +814,7 @@ static bool __init crng_init_try_arch_ea
return arch_init;
}
-static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
+static void crng_initialize_secondary(struct crng_state *crng)
{
memcpy(&crng->state[0], "expand 32-byte k", 16);
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
@@ -867,7 +865,6 @@ static void crng_finalize_init(struct cr
}
}
-#ifdef CONFIG_NUMA
static void do_numa_crng_init(struct work_struct *work)
{
int i;
@@ -894,29 +891,24 @@ static DECLARE_WORK(numa_crng_init_work,
static void numa_crng_init(void)
{
- schedule_work(&numa_crng_init_work);
+ if (IS_ENABLED(CONFIG_NUMA))
+ schedule_work(&numa_crng_init_work);
}
static struct crng_state *select_crng(void)
{
- struct crng_state **pool;
- int nid = numa_node_id();
-
- /* pairs with cmpxchg_release() in do_numa_crng_init() */
- pool = READ_ONCE(crng_node_pool);
- if (pool && pool[nid])
- return pool[nid];
-
- return &primary_crng;
-}
-#else
-static void numa_crng_init(void) {}
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ struct crng_state **pool;
+ int nid = numa_node_id();
+
+ /* pairs with cmpxchg_release() in do_numa_crng_init() */
+ pool = READ_ONCE(crng_node_pool);
+ if (pool && pool[nid])
+ return pool[nid];
+ }
-static struct crng_state *select_crng(void)
-{
return &primary_crng;
}
-#endif
/*
* crng_fast_load() can be called by code in the interrupt service
Powered by blists - more mailing lists