[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <lsq.1518323471.147037335@decadent.org.uk>
Date: Sun, 11 Feb 2018 04:31:11 +0000
From: Ben Hutchings <ben@...adent.org.uk>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
CC: akpm@...ux-foundation.org, "Dmitry Vyukov" <dvyukov@...gle.com>,
"Jens Axboe" <axboe@...nel.dk>
Subject: [PATCH 3.16 076/136] blktrace: fix unlocked access to
init/start-stop/teardown
3.16.54-rc1 review patch. If anyone has any objections, please let me know.
------------------
From: Jens Axboe <axboe@...nel.dk>
commit 1f2cac107c591c24b60b115d6050adc213d10fc0 upstream.
sg.c calls into the blktrace functions without holding the proper queue
mutex for doing setup, start/stop, or teardown.
Add internal unlocked variants, and export the ones that do the proper
locking.
Fixes: 6da127ad0918 ("blktrace: Add blktrace ioctls to SCSI generic devices")
Tested-by: Dmitry Vyukov <dvyukov@...gle.com>
Signed-off-by: Jens Axboe <axboe@...nel.dk>
Signed-off-by: Ben Hutchings <ben@...adent.org.uk>
---
kernel/trace/blktrace.c | 58 ++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 48 insertions(+), 10 deletions(-)
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -307,7 +307,7 @@ static void blk_trace_cleanup(struct blk
blk_unregister_tracepoints();
}
-int blk_trace_remove(struct request_queue *q)
+static int __blk_trace_remove(struct request_queue *q)
{
struct blk_trace *bt;
@@ -320,6 +320,17 @@ int blk_trace_remove(struct request_queu
return 0;
}
+
+int blk_trace_remove(struct request_queue *q)
+{
+ int ret;
+
+ mutex_lock(&q->blk_trace_mutex);
+ ret = __blk_trace_remove(q);
+ mutex_unlock(&q->blk_trace_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(blk_trace_remove);
static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
@@ -536,9 +547,8 @@ err:
return ret;
}
-int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
- struct block_device *bdev,
- char __user *arg)
+static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ struct block_device *bdev, char __user *arg)
{
struct blk_user_trace_setup buts;
int ret;
@@ -557,6 +567,19 @@ int blk_trace_setup(struct request_queue
}
return 0;
}
+
+int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ struct block_device *bdev,
+ char __user *arg)
+{
+ int ret;
+
+ mutex_lock(&q->blk_trace_mutex);
+ ret = __blk_trace_setup(q, name, dev, bdev, arg);
+ mutex_unlock(&q->blk_trace_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(blk_trace_setup);
#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
@@ -593,7 +616,7 @@ static int compat_blk_trace_setup(struct
}
#endif
-int blk_trace_startstop(struct request_queue *q, int start)
+static int __blk_trace_startstop(struct request_queue *q, int start)
{
int ret;
struct blk_trace *bt = q->blk_trace;
@@ -632,6 +655,17 @@ int blk_trace_startstop(struct request_q
return ret;
}
+
+int blk_trace_startstop(struct request_queue *q, int start)
+{
+ int ret;
+
+ mutex_lock(&q->blk_trace_mutex);
+ ret = __blk_trace_startstop(q, start);
+ mutex_unlock(&q->blk_trace_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(blk_trace_startstop);
/*
@@ -662,7 +696,7 @@ int blk_trace_ioctl(struct block_device
switch (cmd) {
case BLKTRACESETUP:
bdevname(bdev, b);
- ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
+ ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
break;
#if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
case BLKTRACESETUP32:
@@ -673,10 +707,10 @@ int blk_trace_ioctl(struct block_device
case BLKTRACESTART:
start = 1;
case BLKTRACESTOP:
- ret = blk_trace_startstop(q, start);
+ ret = __blk_trace_startstop(q, start);
break;
case BLKTRACETEARDOWN:
- ret = blk_trace_remove(q);
+ ret = __blk_trace_remove(q);
break;
default:
ret = -ENOTTY;
@@ -694,10 +728,14 @@ int blk_trace_ioctl(struct block_device
**/
void blk_trace_shutdown(struct request_queue *q)
{
+ mutex_lock(&q->blk_trace_mutex);
+
if (q->blk_trace) {
- blk_trace_startstop(q, 0);
- blk_trace_remove(q);
+ __blk_trace_startstop(q, 0);
+ __blk_trace_remove(q);
}
+
+ mutex_unlock(&q->blk_trace_mutex);
}
/*
Powered by blists - more mailing lists