[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200921075857.4424-10-nstange@suse.de>
Date: Mon, 21 Sep 2020 09:58:25 +0200
From: Nicolai Stange <nstange@...e.de>
To: "Theodore Y. Ts'o" <tytso@....edu>
Cc: linux-crypto@...r.kernel.org, LKML <linux-kernel@...r.kernel.org>,
Arnd Bergmann <arnd@...db.de>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
"Eric W. Biederman" <ebiederm@...ssion.com>,
"Alexander E. Patrakov" <patrakov@...il.com>,
"Ahmed S. Darwish" <darwish.07@...il.com>,
Willy Tarreau <w@....eu>,
Matthew Garrett <mjg59@...f.ucam.org>,
Vito Caputo <vcaputo@...garu.com>,
Andreas Dilger <adilger.kernel@...ger.ca>,
Jan Kara <jack@...e.cz>, Ray Strode <rstrode@...hat.com>,
William Jon McCann <mccann@....edu>,
zhangjs <zachary@...shancloud.com>,
Andy Lutomirski <luto@...nel.org>,
Florian Weimer <fweimer@...hat.com>,
Lennart Poettering <mzxreary@...inter.de>,
Peter Matthias <matthias.peter@....bund.de>,
Marcelo Henrique Cerri <marcelo.cerri@...onical.com>,
Roman Drahtmueller <draht@...altsekun.de>,
Neil Horman <nhorman@...hat.com>,
Randy Dunlap <rdunlap@...radead.org>,
Julia Lawall <julia.lawall@...ia.fr>,
Dan Carpenter <dan.carpenter@...cle.com>,
Andy Lavr <andy.lavr@...il.com>,
Eric Biggers <ebiggers@...nel.org>,
"Jason A. Donenfeld" <Jason@...c4.com>,
Stephan Müller <smueller@...onox.de>,
Torsten Duwe <duwe@...e.de>, Petr Tesarik <ptesarik@...e.cz>,
Nicolai Stange <nstange@...e.de>
Subject: [RFC PATCH 09/41] random: protect ->entropy_count with the pool spinlock
Currently, all updates to ->entropy_count are synchronized by means of
cmpxchg-retry loops found in credit_entropy_bits(),
__credit_entropy_bits_fast() and account() respectively.
However, all but one __credit_entropy_bits_fast() call sites grap the pool
->lock already and it would be nice if the potentially costly cmpxchg could
be avoided in these performance critical paths. In addition to that, future
patches will introduce new fields to struct entropy_store which will
required some kinf of synchronization with ->entropy_count updates from
said producer paths as well.
Protect ->entropy_count with the pool ->lock.
- Make callers of __credit_entropy_bits_fast() invoke it with the
pool ->lock held. Extend existing critical sections where possible.
Drop the cmpxchg-reply loop in __credit_entropy_bits_fast() in favor of
a plain assignment.
- Retain the retry loop in credit_entropy_bits(): the potentially
expensive pool_entropy_delta() should not be called under the lock in
order to not unnecessarily block contenders. In order to continue to
synchronize with __credit_entropy_bits_fast() and account(), the
cmpxchg gets replaced by a plain comparison + store with the ->lock being
held.
- Make account() grab the ->lock and drop the cmpxchg-retry loop in favor
of a plain assignent.
Signed-off-by: Nicolai Stange <nstange@...e.de>
---
drivers/char/random.c | 44 +++++++++++++++++++++++++++++--------------
1 file changed, 30 insertions(+), 14 deletions(-)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d9e4dd27d45d..9f87332b158f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -718,7 +718,7 @@ static unsigned int pool_entropy_delta(struct entropy_store *r,
* Credit the entropy store with n bits of entropy.
* To be used from hot paths when it is either known that nbits is
* smaller than one half of the pool size or losing anything beyond that
- * doesn't matter.
+ * doesn't matter. Must be called with r->lock being held.
*/
static bool __credit_entropy_bits_fast(struct entropy_store *r, int nbits)
{
@@ -727,13 +727,11 @@ static bool __credit_entropy_bits_fast(struct entropy_store *r, int nbits)
if (!nbits)
return false;
-retry:
- orig = READ_ONCE(r->entropy_count);
+ orig = r->entropy_count;
entropy_count = orig + pool_entropy_delta(r, orig,
nbits << ENTROPY_SHIFT,
true);
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
+ WRITE_ONCE(r->entropy_count, entropy_count);
trace_credit_entropy_bits(r->name, nbits,
entropy_count >> ENTROPY_SHIFT, _RET_IP_);
@@ -755,17 +753,28 @@ static bool __credit_entropy_bits_fast(struct entropy_store *r, int nbits)
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
int entropy_count, orig;
+ unsigned long flags;
if (!nbits)
return;
retry:
+ /*
+ * Don't run the potentially expensive pool_entropy_delta()
+ * calculations under the spinlock. Instead retry until
+ * ->entropy_count becomes stable.
+ */
orig = READ_ONCE(r->entropy_count);
entropy_count = orig + pool_entropy_delta(r, orig,
nbits << ENTROPY_SHIFT,
false);
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ spin_lock_irqsave(&r->lock, flags);
+ if (r->entropy_count != orig) {
+ spin_unlock_irqrestore(&r->lock, flags);
goto retry;
+ }
+ WRITE_ONCE(r->entropy_count, entropy_count);
+ spin_unlock_irqrestore(&r->lock, flags);
trace_credit_entropy_bits(r->name, nbits,
entropy_count >> ENTROPY_SHIFT, _RET_IP_);
@@ -1203,12 +1212,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
} sample;
long delta, delta2, delta3;
bool reseed;
+ unsigned long flags;
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
- r = &input_pool;
- mix_pool_bytes(r, &sample, sizeof(sample));
/*
* Calculate number of bits of randomness we probably added.
@@ -1235,12 +1243,16 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
if (delta > delta3)
delta = delta3;
+ r = &input_pool;
+ spin_lock_irqsave(&r->lock, flags);
+ __mix_pool_bytes(r, &sample, sizeof(sample));
/*
* delta is now minimum absolute delta.
* Round down by 1 bit on general principles,
* and limit entropy estimate to 12 bits.
*/
reseed = __credit_entropy_bits_fast(r, min_t(int, fls(delta>>1), 11));
+ spin_unlock_irqrestore(&r->lock, flags);
if (reseed)
crng_reseed(&primary_crng, r);
}
@@ -1358,12 +1370,12 @@ void add_interrupt_randomness(int irq, int irq_flags)
__mix_pool_bytes(r, &seed, sizeof(seed));
credit = 1;
}
- spin_unlock(&r->lock);
fast_pool->count = 0;
/* award one bit for the contents of the fast pool */
reseed = __credit_entropy_bits_fast(r, credit + 1);
+ spin_unlock(&r->lock);
if (reseed)
crng_reseed(&primary_crng, r);
}
@@ -1393,14 +1405,15 @@ EXPORT_SYMBOL_GPL(add_disk_randomness);
*/
static size_t account(struct entropy_store *r, size_t nbytes, int min)
{
- int entropy_count, orig, have_bytes;
+ int entropy_count, have_bytes;
size_t ibytes, nfrac;
+ unsigned long flags;
BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
+ spin_lock_irqsave(&r->lock, flags);
/* Can we pull enough? */
-retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
+ entropy_count = r->entropy_count;
ibytes = nbytes;
/* never pull more than available */
have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
@@ -1420,8 +1433,8 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min)
else
entropy_count = 0;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
+ WRITE_ONCE(r->entropy_count, entropy_count);
+ spin_unlock_irqrestore(&r->lock, flags);
trace_debit_entropy(r->name, 8 * ibytes);
if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
@@ -1639,8 +1652,11 @@ EXPORT_SYMBOL(get_random_bytes);
static void entropy_timer(struct timer_list *t)
{
bool reseed;
+ unsigned long flags;
+ spin_lock_irqsave(&input_pool.lock, flags);
reseed = __credit_entropy_bits_fast(&input_pool, 1);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
if (reseed)
crng_reseed(&primary_crng, &input_pool);
}
--
2.26.2
Powered by blists - more mailing lists