[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <E1Z2Gd9-0002mc-QC@gondolin.me.apana.org.au>
Date: Tue, 09 Jun 2015 18:19:39 +0800
From: Herbert Xu <herbert@...dor.apana.org.au>
To: Stephan Mueller <smueller@...onox.de>, Ted Tso <tytso@....edu>,
andreas.steffen@...ongswan.org, sandyinchina@...il.com,
linux-crypto@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH 1/3] random: Add callback API for random pool readiness
The get_blocking_random_bytes API is broken because the wait can
be arbitrarily long (potentially forever) so there is no safe way
of calling it from within the kernel.
This patch replaces it with a callback API instead. The callback
is invoked potentially from interrupt context so the user needs
to schedule their own work thread if necessary.
In addition to adding callbacks, they can also be removed as
otherwise this opens up a way for user-space to allocate kernel
memory with no bound (by opening algif_rng descriptors and then
closing them).
Signed-off-by: Herbert Xu <herbert@...dor.apana.org.au>
---
drivers/char/random.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++
include/linux/random.h | 9 +++++
2 files changed, 87 insertions(+)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 159d070..a1576ed 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -409,6 +409,9 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
static struct fasync_struct *fasync;
+static DEFINE_SPINLOCK(random_ready_list_lock);
+static LIST_HEAD(random_ready_list);
+
/**********************************************************************
*
* OS independent entropy store. Here are the functions which handle
@@ -589,6 +592,22 @@ static void fast_mix(struct fast_pool *f)
f->count++;
}
+static void process_random_ready_list(void)
+{
+ unsigned long flags;
+ struct random_ready_callback *rdy, *tmp;
+
+ spin_lock_irqsave(&random_ready_list_lock, flags);
+ list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
+ struct module *owner = rdy->owner;
+
+ list_del_init(&rdy->list);
+ rdy->func(rdy);
+ module_put(owner);
+ }
+ spin_unlock_irqrestore(&random_ready_list_lock, flags);
+}
+
/*
* Credit (or debit) the entropy store with n bits of entropy.
* Use credit_entropy_bits_safe() if the value comes from userspace
@@ -660,6 +679,7 @@ retry:
r->entropy_total = 0;
if (r == &nonblocking_pool) {
prandom_reseed_late();
+ process_random_ready_list();
wake_up_all(&urandom_init_wait);
pr_notice("random: %s pool is initialized\n", r->name);
}
@@ -1257,6 +1277,64 @@ void get_blocking_random_bytes(void *buf, int nbytes)
EXPORT_SYMBOL(get_blocking_random_bytes);
/*
+ * Add a callback function that will be invoked when the nonblocking
+ * pool is initialised.
+ *
+ * returns: 0 if callback is successfully added
+ * -EALREADY if pool is already initialised (callback not called)
+ * -ENOENT if module for callback is not alive
+ */
+int add_random_ready_callback(struct random_ready_callback *rdy)
+{
+ struct module *owner;
+ unsigned long flags;
+ int err = -EALREADY;
+
+ if (likely(nonblocking_pool.initialized))
+ return err;
+
+ owner = rdy->owner;
+ if (!try_module_get(owner))
+ return -ENOENT;
+
+ spin_lock_irqsave(&random_ready_list_lock, flags);
+ if (nonblocking_pool.initialized)
+ goto out;
+
+ owner = NULL;
+
+ list_add(&rdy->list, &random_ready_list);
+ err = 0;
+
+out:
+ spin_unlock_irqrestore(&random_ready_list_lock, flags);
+
+ module_put(owner);
+
+ return err;
+}
+EXPORT_SYMBOL(add_random_ready_callback);
+
+/*
+ * Delete a previously registered readiness callback function.
+ */
+void del_random_ready_callback(struct random_ready_callback *rdy)
+{
+ unsigned long flags;
+ struct module *owner = NULL;
+
+ spin_lock_irqsave(&random_ready_list_lock, flags);
+ if (!list_empty(&rdy->list)) {
+ list_del_init(&rdy->list);
+ owner = rdy->owner;
+ }
+ spin_unlock_irqrestore(&random_ready_list_lock, flags);
+
+ module_put(owner);
+}
+EXPORT_SYMBOL(del_random_ready_callback);
+
+/*
* This function will use the architecture-specific hardware random
* number generator if it is available. The arch-specific hw RNG will
* almost certainly be faster than what we can do in software, but it
diff --git a/include/linux/random.h b/include/linux/random.h
index 796267d..30e2aca 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -6,8 +6,15 @@
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
+#include <linux/list.h>
#include <uapi/linux/random.h>
+struct random_ready_callback {
+ struct list_head list;
+ void (*func)(struct random_ready_callback *rdy);
+ struct module *owner;
+};
+
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
@@ -15,6 +22,8 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
extern void get_blocking_random_bytes(void *buf, int nbytes);
+extern int add_random_ready_callback(struct random_ready_callback *rdy);
+extern void del_random_ready_callback(struct random_ready_callback *rdy);
extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
extern int random_int_secret_init(void);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists