[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1431087353-11376-10-git-send-email-sergey.senozhatsky@gmail.com>
Date: Fri, 8 May 2015 21:15:52 +0900
From: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
Minchan Kim <minchan@...nel.org>
Cc: Nitin Gupta <ngupta@...are.org>, linux-kernel@...r.kernel.org,
Sergey Senozhatsky <sergey.senozhatsky.work@...il.com>
Subject: [PATCHv5 09/10] zram: close race by open overriding
From: Minchan Kim <minchan@...nel.org>
ba6b17d68c8e3aa8d ("zram: fix umount-reset_store-mount race condition")
introduced bdev->bd_mutex to protect a race between mount and reset. At
that time, we don't have dynamic zram-add/remove feature so it was okay.
However, as we introduce dynamic device feature, bd_mutex became
trouble.
CPU 0
echo 1 > /sys/block/zram<id>/reset
-> kernfs->s_active(A)
-> zram:reset_store->bd_mutex(B)
CPU 1
echo <id> > /sys/class/zram/zram-remove
->zram:zram_remove: bd_mutex(B)
-> sysfs_remove_group
-> kernfs->s_active(A)
IOW, AB -> BA deadlock
The reason we are holding bd_mutex for zram_remove is to prevent
any incoming open /dev/zram[0-9]. Otherwise, we could remove zram
others already have opened. But it causes above deadlock problem.
To fix the problem, this patch overrides block_device.open and
it returns -EBUSY if zram asserts he claims zram to reset so any
incoming open will be failed so we don't need to hold bd_mutex
for zram_remove ayn more.
This patch is to prepare for zram-add/remove feature.
[sergey.senozhatsky@...il.com: simplify reset_store()]
Signed-off-by: Minchan Kim <minchan@...nel.org>
Acked-by: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
---
drivers/block/zram/zram_drv.c | 53 +++++++++++++++++++++++++++----------------
drivers/block/zram/zram_drv.h | 4 ++++
2 files changed, 38 insertions(+), 19 deletions(-)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 3df4394..b3541df 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1068,45 +1068,60 @@ static ssize_t reset_store(struct device *dev,
struct zram *zram;
struct block_device *bdev;
+ ret = kstrtou16(buf, 10, &do_reset);
+ if (ret)
+ return ret;
+
+ if (!do_reset)
+ return -EINVAL;
+
zram = dev_to_zram(dev);
bdev = bdget_disk(zram->disk, 0);
-
if (!bdev)
return -ENOMEM;
mutex_lock(&bdev->bd_mutex);
- /* Do not reset an active device! */
- if (bdev->bd_openers) {
- ret = -EBUSY;
- goto out;
+ /* Do not reset an active device or claimed device */
+ if (bdev->bd_openers || zram->claim) {
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdev);
+ return -EBUSY;
}
- ret = kstrtou16(buf, 10, &do_reset);
- if (ret)
- goto out;
-
- if (!do_reset) {
- ret = -EINVAL;
- goto out;
- }
+ /* From now on, anyone can't open /dev/zram[0-9] */
+ zram->claim = true;
+ mutex_unlock(&bdev->bd_mutex);
- /* Make sure all pending I/O is finished */
+ /* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
-
- mutex_unlock(&bdev->bd_mutex);
revalidate_disk(zram->disk);
bdput(bdev);
+ mutex_lock(&bdev->bd_mutex);
+ zram->claim = false;
+ mutex_unlock(&bdev->bd_mutex);
+
return len;
+}
+
+static int zram_open(struct block_device *bdev, fmode_t mode)
+{
+ int ret = 0;
+ struct zram *zram;
+
+ WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
+
+ zram = bdev->bd_disk->private_data;
+ /* zram was claimed to reset so open request fails */
+ if (zram->claim)
+ ret = -EBUSY;
-out:
- mutex_unlock(&bdev->bd_mutex);
- bdput(bdev);
return ret;
}
static const struct block_device_operations zram_devops = {
+ .open = zram_open,
.swap_slot_free_notify = zram_slot_free_notify,
.rw_page = zram_rw_page,
.owner = THIS_MODULE
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 042994e..6dbe2df 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -115,5 +115,9 @@ struct zram {
*/
u64 disksize; /* bytes */
char compressor[10];
+ /*
+ * zram is claimed so open request will be failed
+ */
+ bool claim; /* Protected by bdev->bd_mutex */
};
#endif
--
2.4.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists