[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1417839402-13490-2-git-send-email-akong@redhat.com>
Date: Sat, 6 Dec 2014 12:16:37 +0800
From: Amos Kong <akong@...hat.com>
To: virtualization@...ts.linux-foundation.org, rusty@...tcorp.com.au
Cc: kvm@...r.kernel.org, m@...s.ch, mpm@...enic.com,
amit.shah@...hat.com, linux-kernel@...r.kernel.org,
herbert@...dor.apana.org.au
Subject: [PATCH v5 1/6] hw_random: place mutex around read functions and buffers.
From: Rusty Russell <rusty@...tcorp.com.au>
There's currently a big lock around everything, and it means that we
can't query sysfs (eg /sys/devices/virtual/misc/hw_random/rng_current)
while the rng is reading. This is a real problem when the rng is slow,
or blocked (eg. virtio_rng with qemu's default /dev/random backend)
This doesn't help (it leaves the current lock untouched), just adds a
lock to protect the read function and the static buffers, in preparation
for transition.
Signed-off-by: Rusty Russell <rusty@...tcorp.com.au>
---
drivers/char/hw_random/core.c | 20 +++++++++++++++++---
1 file changed, 17 insertions(+), 3 deletions(-)
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index aa30a25..b1b6042 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -53,7 +53,10 @@
static struct hwrng *current_rng;
static struct task_struct *hwrng_fill;
static LIST_HEAD(rng_list);
+/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
+/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
+static DEFINE_MUTEX(reading_mutex);
static int data_avail;
static u8 *rng_buffer, *rng_fillbuf;
static unsigned short current_quality;
@@ -81,7 +84,9 @@ static void add_early_randomness(struct hwrng *rng)
unsigned char bytes[16];
int bytes_read;
+ mutex_lock(&reading_mutex);
bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ mutex_unlock(&reading_mutex);
if (bytes_read > 0)
add_device_randomness(bytes, bytes_read);
}
@@ -128,6 +133,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait) {
int present;
+ BUG_ON(!mutex_is_locked(&reading_mutex));
if (rng->read)
return rng->read(rng, (void *)buffer, size, wait);
@@ -160,13 +166,14 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
goto out_unlock;
}
+ mutex_lock(&reading_mutex);
if (!data_avail) {
bytes_read = rng_get_data(current_rng, rng_buffer,
rng_buffer_size(),
!(filp->f_flags & O_NONBLOCK));
if (bytes_read < 0) {
err = bytes_read;
- goto out_unlock;
+ goto out_unlock_reading;
}
data_avail = bytes_read;
}
@@ -174,7 +181,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
if (!data_avail) {
if (filp->f_flags & O_NONBLOCK) {
err = -EAGAIN;
- goto out_unlock;
+ goto out_unlock_reading;
}
} else {
len = data_avail;
@@ -186,7 +193,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
if (copy_to_user(buf + ret, rng_buffer + data_avail,
len)) {
err = -EFAULT;
- goto out_unlock;
+ goto out_unlock_reading;
}
size -= len;
@@ -194,6 +201,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
}
mutex_unlock(&rng_mutex);
+ mutex_unlock(&reading_mutex);
if (need_resched())
schedule_timeout_interruptible(1);
@@ -208,6 +216,9 @@ out:
out_unlock:
mutex_unlock(&rng_mutex);
goto out;
+out_unlock_reading:
+ mutex_unlock(&reading_mutex);
+ goto out_unlock;
}
@@ -348,13 +359,16 @@ static int hwrng_fillfn(void *unused)
while (!kthread_should_stop()) {
if (!current_rng)
break;
+ mutex_lock(&reading_mutex);
rc = rng_get_data(current_rng, rng_fillbuf,
rng_buffer_size(), 1);
+ mutex_unlock(&reading_mutex);
if (rc <= 0) {
pr_warn("hwrng: no data available\n");
msleep_interruptible(10000);
continue;
}
+ /* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
rc * current_quality * 8 >> 10);
}
--
1.9.3
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists