[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240207092756.2087888-1-linan666@huaweicloud.com>
Date: Wed, 7 Feb 2024 17:27:56 +0800
From: linan666@...weicloud.com
To: axboe@...nel.dk
Cc: linux-raid@...r.kernel.org,
song@...nel.org,
linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
linan666@...weicloud.com,
yukuai3@...wei.com,
yi.zhang@...wei.com,
houtao1@...wei.com,
yangerkun@...wei.com
Subject: [PATCH] block: fix deadlock between bd_link_disk_holder and partition scan
From: Li Nan <linan122@...wei.com>
'open_mutex' of gendisk is used to protect open/close block devices. But
in bd_link_disk_holder(), it is used to protect the creation of symlink
between holding disk and slave bdev, which introduces some issues.
When bd_link_disk_holder() is called, the driver is usually in the process
of initialization/modification and may suspend submitting io. At this
time, any io hold 'open_mutex', such as scanning partitions, can cause
deadlocks. For example, in raid:
T1 T2
bdev_open_by_dev
lock open_mutex [1]
...
efi_partition
...
md_submit_bio
md_ioctl mddev_syspend
-> suspend all io
md_add_new_disk
bind_rdev_to_array
bd_link_disk_holder
try lock open_mutex [2]
md_handle_request
-> wait mddev_resume
T1 scan partition, T2 add a new device to raid. T1 waits for T2 to resume
mddev, but T2 waits for open_mutex held by T1. Deadlock occurs.
Fix it by introducing a local mutex 'holder_mutex' to replace 'open_mutex'.
Signed-off-by: Li Nan <linan122@...wei.com>
---
block/holder.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/block/holder.c b/block/holder.c
index 37d18c13d958..5bfb0a674cc7 100644
--- a/block/holder.c
+++ b/block/holder.c
@@ -8,6 +8,8 @@ struct bd_holder_disk {
int refcnt;
};
+static DEFINE_MUTEX(holder_mutex);
+
static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
struct gendisk *disk)
{
@@ -80,7 +82,7 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
kobject_get(bdev->bd_holder_dir);
mutex_unlock(&bdev->bd_disk->open_mutex);
- mutex_lock(&disk->open_mutex);
+ mutex_lock(&holder_mutex);
WARN_ON_ONCE(!bdev->bd_holder);
holder = bd_find_holder_disk(bdev, disk);
@@ -108,7 +110,7 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
goto out_del_symlink;
list_add(&holder->list, &disk->slave_bdevs);
- mutex_unlock(&disk->open_mutex);
+ mutex_unlock(&holder_mutex);
return 0;
out_del_symlink:
@@ -116,7 +118,7 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
out_free_holder:
kfree(holder);
out_unlock:
- mutex_unlock(&disk->open_mutex);
+ mutex_unlock(&holder_mutex);
if (ret)
kobject_put(bdev->bd_holder_dir);
return ret;
@@ -140,7 +142,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
if (WARN_ON_ONCE(!disk->slave_dir))
return;
- mutex_lock(&disk->open_mutex);
+ mutex_lock(&holder_mutex);
holder = bd_find_holder_disk(bdev, disk);
if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
del_symlink(disk->slave_dir, bdev_kobj(bdev));
@@ -149,6 +151,6 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
list_del_init(&holder->list);
kfree(holder);
}
- mutex_unlock(&disk->open_mutex);
+ mutex_unlock(&holder_mutex);
}
EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
--
2.39.2
Powered by blists - more mailing lists