[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CADAEsF-9dfpUijt-Ve+aE4S8KuymE=k-HWaG-Lvqxt73KAs4nQ@mail.gmail.com>
Date: Tue, 3 Feb 2015 22:05:31 +0800
From: Ganesh Mahendran <opensource.ganesh@...il.com>
To: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
Cc: Minchan Kim <minchan@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Jerome Marchand <jmarchan@...hat.com>,
linux-kernel <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH] zram: fix umount-reset_store-mount race condition
Hello, Sergey
2015-02-02 22:08 GMT+08:00 Sergey Senozhatsky <sergey.senozhatsky@...il.com>:
> Ganesh Mahendran was the first one who proposed to use bdev->bd_mutex
> to avoid ->bd_holders race condition:
>
> CPU0 CPU1
> umount /* zram->init_done is true */
> reset_store()
> bdev->bd_holders == 0 mount
> ... zram_make_request()
> zram_reset_device()
>
> However, his solution required some considerable amount of code movement,
> which we can avoid.
>
> Apart from using bdev->bd_mutex in reset_store(), this patch also
> simplifies zram_reset_device().
>
> zram_reset_device() has a bool parameter reset_capacity which tells
> it whether disk capacity and itself disk should be reset. There are
> two zram_reset_device() callers:
> -- zram_exit() passes reset_capacity=false
> -- reset_store() passes reset_capacity=true
>
> So we can move reset_capacity-sensitive work out of zram_reset_device()
> and perform it unconditionally in reset_store(). This also lets us drop
> reset_capacity parameter from zram_reset_device() and pass zram pointer
> only.
>
> Reported-by: Ganesh Mahendran <opensource.ganesh@...il.com>
> Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@...il.com>
> ---
> drivers/block/zram/zram_drv.c | 23 +++++++++--------------
> 1 file changed, 9 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index aa5a4c5..a32069f 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -717,7 +717,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
> }
> }
>
> -static void zram_reset_device(struct zram *zram, bool reset_capacity)
> +static void zram_reset_device(struct zram *zram)
> {
> down_write(&zram->init_lock);
>
> @@ -736,18 +736,7 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
> memset(&zram->stats, 0, sizeof(zram->stats));
>
> zram->disksize = 0;
> - if (reset_capacity)
> - set_capacity(zram->disk, 0);
How about keep this here? Protected by zram->init_lock.
set_capacity(zram->disk, 0);
Sorry for the late response.
> -
> up_write(&zram->init_lock);
> -
> - /*
> - * Revalidate disk out of the init_lock to avoid lockdep splat.
> - * It's okay because disk's capacity is protected by init_lock
> - * so that revalidate_disk always sees up-to-date capacity.
> - */
> - if (reset_capacity)
> - revalidate_disk(zram->disk);
> }
>
> static ssize_t disksize_store(struct device *dev,
> @@ -820,6 +809,7 @@ static ssize_t reset_store(struct device *dev,
> if (!bdev)
> return -ENOMEM;
>
> + mutex_lock(&bdev->bd_mutex);
> /* Do not reset an active device! */
> if (bdev->bd_holders) {
> ret = -EBUSY;
> @@ -837,12 +827,17 @@ static ssize_t reset_store(struct device *dev,
>
> /* Make sure all pending I/O is finished */
> fsync_bdev(bdev);
> + zram_reset_device(zram);
> + set_capacity(zram->disk, 0);
> +
> + mutex_unlock(&bdev->bd_mutex);
> + revalidate_disk(zram->disk);
> bdput(bdev);
>
> - zram_reset_device(zram, true);
> return len;
>
> out:
> + mutex_unlock(&bdev->bd_mutex);
> bdput(bdev);
> return ret;
> }
> @@ -1188,7 +1183,7 @@ static void __exit zram_exit(void)
> * Shouldn't access zram->disk after destroy_device
> * because destroy_device already released zram->disk.
> */
> - zram_reset_device(zram, false);
> + zram_reset_device(zram);
> }
>
> unregister_blkdev(zram_major, "zram");
> --
> 2.3.0.rc2
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists