[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CY4PR04MB37511C151D0F37DC62BB4CE6E7350@CY4PR04MB3751.namprd04.prod.outlook.com>
Date: Mon, 28 Sep 2020 10:11:57 +0000
From: Damien Le Moal <Damien.LeMoal@....com>
To: Kanchan Joshi <joshi.k@...sung.com>,
"axboe@...nel.dk" <axboe@...nel.dk>
CC: "linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-block@...r.kernel.org" <linux-block@...r.kernel.org>,
"stable@...r.kernel.org" <stable@...r.kernel.org>,
"selvakuma.s1@...sung.com" <selvakuma.s1@...sung.com>,
"nj.shetty@...sung.com" <nj.shetty@...sung.com>,
"javier.gonz@...sung.com" <javier.gonz@...sung.com>
Subject: Re: [PATCH v2 1/1] null_blk: synchronization fix for zoned device
On 2020/09/28 18:59, Kanchan Joshi wrote:
> Parallel write,read,zone-mgmt operations accessing/altering zone state
> and write-pointer may get into race. Avoid the situation by using a new
> spinlock for zoned device.
> Concurrent zone-appends (on a zone) returning same write-pointer issue
> is also avoided using this lock.
>
> Fixes: e0489ed5daeb ("null_blk: Support REQ_OP_ZONE_APPEND")
> Signed-off-by: Kanchan Joshi <joshi.k@...sung.com>
> ---
> drivers/block/null_blk.h | 1 +
> drivers/block/null_blk_zoned.c | 22 ++++++++++++++++++----
> 2 files changed, 19 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
> index daed4a9c3436..28099be50395 100644
> --- a/drivers/block/null_blk.h
> +++ b/drivers/block/null_blk.h
> @@ -44,6 +44,7 @@ struct nullb_device {
> unsigned int nr_zones;
> struct blk_zone *zones;
> sector_t zone_size_sects;
> + spinlock_t zone_lock;
>
> unsigned long size; /* device size in MB */
> unsigned long completion_nsec; /* time in ns to complete a request */
> diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
> index 3d25c9ad2383..e8d8b13aaa5a 100644
> --- a/drivers/block/null_blk_zoned.c
> +++ b/drivers/block/null_blk_zoned.c
> @@ -45,6 +45,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
> if (!dev->zones)
> return -ENOMEM;
>
> + spin_lock_init(&dev->zone_lock);
> if (dev->zone_nr_conv >= dev->nr_zones) {
> dev->zone_nr_conv = dev->nr_zones - 1;
> pr_info("changed the number of conventional zones to %u",
> @@ -131,8 +132,11 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
> * So use a local copy to avoid corruption of the device zone
> * array.
> */
> + spin_lock_irq(&dev->zone_lock);
> memcpy(&zone, &dev->zones[first_zone + i],
> sizeof(struct blk_zone));
> + spin_unlock_irq(&dev->zone_lock);
> +
> error = cb(&zone, i, data);
> if (error)
> return error;
> @@ -277,18 +281,28 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
> blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
> sector_t sector, sector_t nr_sectors)
> {
> + blk_status_t sts;
> + struct nullb_device *dev = cmd->nq->dev;
> +
> + spin_lock_irq(&dev->zone_lock);
> switch (op) {
> case REQ_OP_WRITE:
> - return null_zone_write(cmd, sector, nr_sectors, false);
> + sts = null_zone_write(cmd, sector, nr_sectors, false);
> + break;
> case REQ_OP_ZONE_APPEND:
> - return null_zone_write(cmd, sector, nr_sectors, true);
> + sts = null_zone_write(cmd, sector, nr_sectors, true);
> + break;
> case REQ_OP_ZONE_RESET:
> case REQ_OP_ZONE_RESET_ALL:
> case REQ_OP_ZONE_OPEN:
> case REQ_OP_ZONE_CLOSE:
> case REQ_OP_ZONE_FINISH:
> - return null_zone_mgmt(cmd, op, sector);
> + sts = null_zone_mgmt(cmd, op, sector);
> + break;
> default:
> - return null_process_cmd(cmd, op, sector, nr_sectors);
> + sts = null_process_cmd(cmd, op, sector, nr_sectors);
> }
> + spin_unlock_irq(&dev->zone_lock);
> +
> + return sts;
> }
>
Looks good.
Reviewed-by: Damien Le Moal <damien.lemoal@....com>
--
Damien Le Moal
Western Digital Research
Powered by blists - more mailing lists