[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20221005031701.79077-3-kch@nvidia.com>
Date: Tue, 4 Oct 2022 20:16:57 -0700
From: Chaitanya Kulkarni <kch@...dia.com>
To: <linux-block@...r.kernel.org>, <linux-kernel@...r.kernel.org>
CC: <axboe@...nel.dk>, <kch@...dia.com>,
<damien.lemoal@...nsource.wdc.com>, <johannes.thumshirn@....com>,
<bvanassche@....org>, <ming.lei@...hat.com>,
<shinichiro.kawasaki@....com>, <vincent.fu@...sung.com>,
<yukuai3@...wei.com>
Subject: [PATCH 2/6] null_blk: allow write zeores on membacked
Add a helper functions to enable the REQ_OP_WRITE_ZEROES operations
when null_blk is configured with the membacked mode.
Since write-zeroes is a non-trivial I/O operation we need this to
add a blktest so we can test the non-trivial I/O path from the
application to the block layer.
Signed-off-by: Chaitanya Kulkarni <kch@...dia.com>
---
drivers/block/null_blk/main.c | 46 ++++++++++++++++++++++++++++++++++-
1 file changed, 45 insertions(+), 1 deletion(-)
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index fc3e883f7b84..2d592b4eb815 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -420,6 +420,7 @@ NULLB_DEVICE_ATTR(blocking, bool, NULL);
NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
NULLB_DEVICE_ATTR(discard, bool, NULL);
+NULLB_DEVICE_ATTR(write_zeroes, bool, NULL);
NULLB_DEVICE_ATTR(mbps, uint, NULL);
NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
NULLB_DEVICE_ATTR(zoned, bool, NULL);
@@ -544,6 +545,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_power,
&nullb_device_attr_memory_backed,
&nullb_device_attr_discard,
+ &nullb_device_attr_write_zeroes,
&nullb_device_attr_mbps,
&nullb_device_attr_cache_size,
&nullb_device_attr_badblocks,
@@ -618,7 +620,7 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
"poll_queues,power,queue_mode,shared_tag_bitmap,size,"
"submit_queues,use_per_node_hctx,virt_boundary,zoned,"
"zone_capacity,zone_max_active,zone_max_open,"
- "zone_nr_conv,zone_size\n");
+ "zone_nr_conv,zone_size,write_zeroes\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -875,6 +877,24 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
}
}
+static void null_zero_sector(struct nullb_device *d, sector_t sect,
+ sector_t nr_sects, bool cache)
+{
+ struct radix_tree_root *root = cache ? &d->cache : &d->data;
+ struct nullb_page *t_page;
+ unsigned int offset;
+ void *dest;
+
+ t_page = radix_tree_lookup(root, sect >> PAGE_SECTORS_SHIFT);
+ if (!t_page)
+ return;
+
+ offset = (sect & SECTOR_MASK) << SECTOR_SHIFT;
+ dest = kmap_atomic(t_page->page);
+ memset(dest + offset, 0, SECTOR_SIZE * nr_sects);
+ kunmap_atomic(dest);
+}
+
static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
struct nullb_page *t_page, bool is_cache)
{
@@ -1191,6 +1211,27 @@ blk_status_t null_handle_discard(struct nullb_device *dev,
return BLK_STS_OK;
}
+static blk_status_t null_handle_write_zeroes(struct nullb_device *dev,
+ sector_t sector, sector_t nr_sectors)
+{
+ unsigned int bytes_left = nr_sectors << 9;
+ struct nullb *nullb = dev->nullb;
+ size_t curr_bytes;
+
+ spin_lock_irq(&nullb->lock);
+ while (bytes_left > 0) {
+ curr_bytes = min_t(size_t, bytes_left, nullb->dev->blocksize);
+ nr_sectors = curr_bytes >> SECTOR_SHIFT;
+ null_zero_sector(nullb->dev, sector, nr_sectors, false);
+ if (null_cache_active(nullb))
+ null_zero_sector(nullb->dev, sector, nr_sectors, true);
+ sector += nr_sectors;
+ bytes_left -= curr_bytes;
+ }
+ spin_unlock_irq(&nullb->lock);
+ return BLK_STS_OK;
+}
+
static int null_handle_flush(struct nullb *nullb)
{
int err;
@@ -1357,6 +1398,9 @@ static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
if (op == REQ_OP_DISCARD)
return null_handle_discard(dev, sector, nr_sectors);
+ if (op == REQ_OP_WRITE_ZEROES)
+ return null_handle_write_zeroes(dev, sector, nr_sectors);
+
if (dev->queue_mode == NULL_Q_BIO)
err = null_handle_bio(cmd);
else
--
2.29.0
Powered by blists - more mailing lists