[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <9b4c511b-5ab1-7ba2-8ccc-5538c4672eb4@huawei.com>
Date: Thu, 22 Feb 2024 17:26:36 +0800
From: Yu Kuai <yukuai3@...wei.com>
To: <linan666@...weicloud.com>, <axboe@...nel.dk>, <song@...nel.org>,
Christoph Hellwig <hch@....de>
CC: <linux-raid@...r.kernel.org>, <linux-block@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <yi.zhang@...wei.com>, <houtao1@...wei.com>,
<yangerkun@...wei.com>
Subject: Re: [PATCH v2] block: fix deadlock between bd_link_disk_holder and
partition scan
+CC Christoph
在 2024/02/21 17:01, linan666@...weicloud.com 写道:
> From: Li Nan <linan122@...wei.com>
>
> 'open_mutex' of gendisk is used to protect open/close block devices. But
> in bd_link_disk_holder(), it is used to protect the creation of symlink
> between holding disk and slave bdev, which introduces some issues.
>
> When bd_link_disk_holder() is called, the driver is usually in the process
> of initialization/modification and may suspend submitting io. At this
> time, any io hold 'open_mutex', such as scanning partitions, can cause
> deadlocks. For example, in raid:
>
> T1 T2
> bdev_open_by_dev
> lock open_mutex [1]
> ...
> efi_partition
> ...
> md_submit_bio
> md_ioctl mddev_syspend
> -> suspend all io
> md_add_new_disk
> bind_rdev_to_array
> bd_link_disk_holder
> try lock open_mutex [2]
> md_handle_request
> -> wait mddev_resume
>
> T1 scan partition, T2 add a new device to raid. T1 waits for T2 to resume
> mddev, but T2 waits for open_mutex held by T1. Deadlock occurs.
>
> Fix it by introducing a local mutex 'blk_holder_mutex' to replace
> 'open_mutex'.
>
> Fixes: 1b0a2d950ee2 ("md: use new apis to suspend array for ioctls involed array reconfiguration")
> Reported-by: mgperkow@...il.com
> Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218459
> Signed-off-by: Li Nan <linan122@...wei.com>
Reviewed-by: Yu Kuai <yukuai3@...wei.com>
> ---
> v2: add a blk_ prefix to 'holder_mutex'.
>
> block/holder.c | 12 +++++++-----
> 1 file changed, 7 insertions(+), 5 deletions(-)
>
> diff --git a/block/holder.c b/block/holder.c
> index 37d18c13d958..791091a7eac2 100644
> --- a/block/holder.c
> +++ b/block/holder.c
> @@ -8,6 +8,8 @@ struct bd_holder_disk {
> int refcnt;
> };
>
> +static DEFINE_MUTEX(blk_holder_mutex);
> +
> static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
> struct gendisk *disk)
> {
> @@ -80,7 +82,7 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
> kobject_get(bdev->bd_holder_dir);
> mutex_unlock(&bdev->bd_disk->open_mutex);
>
> - mutex_lock(&disk->open_mutex);
> + mutex_lock(&blk_holder_mutex);
> WARN_ON_ONCE(!bdev->bd_holder);
>
> holder = bd_find_holder_disk(bdev, disk);
> @@ -108,7 +110,7 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
> goto out_del_symlink;
> list_add(&holder->list, &disk->slave_bdevs);
>
> - mutex_unlock(&disk->open_mutex);
> + mutex_unlock(&blk_holder_mutex);
> return 0;
>
> out_del_symlink:
> @@ -116,7 +118,7 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
> out_free_holder:
> kfree(holder);
> out_unlock:
> - mutex_unlock(&disk->open_mutex);
> + mutex_unlock(&blk_holder_mutex);
> if (ret)
> kobject_put(bdev->bd_holder_dir);
> return ret;
> @@ -140,7 +142,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
> if (WARN_ON_ONCE(!disk->slave_dir))
> return;
>
> - mutex_lock(&disk->open_mutex);
> + mutex_lock(&blk_holder_mutex);
> holder = bd_find_holder_disk(bdev, disk);
> if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
> del_symlink(disk->slave_dir, bdev_kobj(bdev));
> @@ -149,6 +151,6 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
> list_del_init(&holder->list);
> kfree(holder);
> }
> - mutex_unlock(&disk->open_mutex);
> + mutex_unlock(&blk_holder_mutex);
> }
> EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
>
Powered by blists - more mailing lists