[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190121134917.361471897@linuxfoundation.org>
Date: Mon, 21 Jan 2019 14:49:17 +0100
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Jan Kara <jack@...e.cz>,
Jens Axboe <axboe@...nel.dk>
Subject: [PATCH 4.19 85/99] loop: Split setting of lo_state from loop_clr_fd
4.19-stable review patch. If anyone has any objections, please let me know.
------------------
From: Jan Kara <jack@...e.cz>
commit a2505b799a496b7b84d9a4a14ec870ff9e42e11b upstream.
Move setting of lo_state to Lo_rundown out into the callers. That will
allow us to unlock loop_ctl_mutex while the loop device is protected
from other changes by its special state.
Signed-off-by: Jan Kara <jack@...e.cz>
Signed-off-by: Jens Axboe <axboe@...nel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/block/loop.c | 52 ++++++++++++++++++++++++++++++---------------------
1 file changed, 31 insertions(+), 21 deletions(-)
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -976,7 +976,7 @@ static int loop_set_fd(struct loop_devic
loop_reread_partitions(lo, bdev);
/* Grab the block_device to prevent its destruction after we
- * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
+ * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
*/
bdgrab(bdev);
return 0;
@@ -1026,31 +1026,15 @@ loop_init_xfer(struct loop_device *lo, s
return err;
}
-static int loop_clr_fd(struct loop_device *lo)
+static int __loop_clr_fd(struct loop_device *lo)
{
struct file *filp = lo->lo_backing_file;
gfp_t gfp = lo->old_gfp_mask;
struct block_device *bdev = lo->lo_device;
- if (lo->lo_state != Lo_bound)
+ if (WARN_ON_ONCE(lo->lo_state != Lo_rundown))
return -ENXIO;
- /*
- * If we've explicitly asked to tear down the loop device,
- * and it has an elevated reference count, set it for auto-teardown when
- * the last reference goes away. This stops $!~#$@ udev from
- * preventing teardown because it decided that it needs to run blkid on
- * the loopback device whenever they appear. xfstests is notorious for
- * failing tests because blkid via udev races with a losetup
- * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
- * command to fail with EBUSY.
- */
- if (atomic_read(&lo->lo_refcnt) > 1) {
- lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
- mutex_unlock(&loop_ctl_mutex);
- return 0;
- }
-
if (filp == NULL)
return -EINVAL;
@@ -1058,7 +1042,6 @@ static int loop_clr_fd(struct loop_devic
blk_mq_freeze_queue(lo->lo_queue);
spin_lock_irq(&lo->lo_lock);
- lo->lo_state = Lo_rundown;
lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock);
@@ -1111,6 +1094,30 @@ static int loop_clr_fd(struct loop_devic
return 0;
}
+static int loop_clr_fd(struct loop_device *lo)
+{
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+ /*
+ * If we've explicitly asked to tear down the loop device,
+ * and it has an elevated reference count, set it for auto-teardown when
+ * the last reference goes away. This stops $!~#$@ udev from
+ * preventing teardown because it decided that it needs to run blkid on
+ * the loopback device whenever they appear. xfstests is notorious for
+ * failing tests because blkid via udev races with a losetup
+ * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
+ * command to fail with EBUSY.
+ */
+ if (atomic_read(&lo->lo_refcnt) > 1) {
+ lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+ mutex_unlock(&loop_ctl_mutex);
+ return 0;
+ }
+ lo->lo_state = Lo_rundown;
+
+ return __loop_clr_fd(lo);
+}
+
static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
@@ -1692,11 +1699,14 @@ static void lo_release(struct gendisk *d
goto out_unlock;
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
+ if (lo->lo_state != Lo_bound)
+ goto out_unlock;
+ lo->lo_state = Lo_rundown;
/*
* In autoclear mode, stop the loop thread
* and remove configuration after last close.
*/
- err = loop_clr_fd(lo);
+ err = __loop_clr_fd(lo);
if (!err)
return;
} else if (lo->lo_state == Lo_bound) {
Powered by blists - more mailing lists