[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200928095549.184510-2-joshi.k@samsung.com>
Date: Mon, 28 Sep 2020 15:25:49 +0530
From: Kanchan Joshi <joshi.k@...sung.com>
To: axboe@...nel.dk, Damien.LeMoal@....com
Cc: linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
stable@...r.kernel.org, selvakuma.s1@...sung.com,
nj.shetty@...sung.com, javier.gonz@...sung.com,
Kanchan Joshi <joshi.k@...sung.com>
Subject: [PATCH v2 1/1] null_blk: synchronization fix for zoned device
Parallel write,read,zone-mgmt operations accessing/altering zone state
and write-pointer may get into race. Avoid the situation by using a new
spinlock for zoned device.
Concurrent zone-appends (on a zone) returning same write-pointer issue
is also avoided using this lock.
Fixes: e0489ed5daeb ("null_blk: Support REQ_OP_ZONE_APPEND")
Signed-off-by: Kanchan Joshi <joshi.k@...sung.com>
---
drivers/block/null_blk.h | 1 +
drivers/block/null_blk_zoned.c | 22 ++++++++++++++++++----
2 files changed, 19 insertions(+), 4 deletions(-)
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index daed4a9c3436..28099be50395 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -44,6 +44,7 @@ struct nullb_device {
unsigned int nr_zones;
struct blk_zone *zones;
sector_t zone_size_sects;
+ spinlock_t zone_lock;
unsigned long size; /* device size in MB */
unsigned long completion_nsec; /* time in ns to complete a request */
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 3d25c9ad2383..e8d8b13aaa5a 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -45,6 +45,7 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
if (!dev->zones)
return -ENOMEM;
+ spin_lock_init(&dev->zone_lock);
if (dev->zone_nr_conv >= dev->nr_zones) {
dev->zone_nr_conv = dev->nr_zones - 1;
pr_info("changed the number of conventional zones to %u",
@@ -131,8 +132,11 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
* So use a local copy to avoid corruption of the device zone
* array.
*/
+ spin_lock_irq(&dev->zone_lock);
memcpy(&zone, &dev->zones[first_zone + i],
sizeof(struct blk_zone));
+ spin_unlock_irq(&dev->zone_lock);
+
error = cb(&zone, i, data);
if (error)
return error;
@@ -277,18 +281,28 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector, sector_t nr_sectors)
{
+ blk_status_t sts;
+ struct nullb_device *dev = cmd->nq->dev;
+
+ spin_lock_irq(&dev->zone_lock);
switch (op) {
case REQ_OP_WRITE:
- return null_zone_write(cmd, sector, nr_sectors, false);
+ sts = null_zone_write(cmd, sector, nr_sectors, false);
+ break;
case REQ_OP_ZONE_APPEND:
- return null_zone_write(cmd, sector, nr_sectors, true);
+ sts = null_zone_write(cmd, sector, nr_sectors, true);
+ break;
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
- return null_zone_mgmt(cmd, op, sector);
+ sts = null_zone_mgmt(cmd, op, sector);
+ break;
default:
- return null_process_cmd(cmd, op, sector, nr_sectors);
+ sts = null_process_cmd(cmd, op, sector, nr_sectors);
}
+ spin_unlock_irq(&dev->zone_lock);
+
+ return sts;
}
--
2.25.1
Powered by blists - more mailing lists