[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230503183821.1473305-2-john.g.garry@oracle.com>
Date: Wed, 3 May 2023 18:38:06 +0000
From: John Garry <john.g.garry@...cle.com>
To: axboe@...nel.dk, kbusch@...nel.org, hch@....de, sagi@...mberg.me,
martin.petersen@...cle.com, djwong@...nel.org,
viro@...iv.linux.org.uk, brauner@...nel.org, dchinner@...hat.com,
jejb@...ux.ibm.com
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-nvme@...ts.infradead.org, linux-scsi@...r.kernel.org,
linux-xfs@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-security-module@...r.kernel.org, paul@...l-moore.com,
jmorris@...ei.org, serge@...lyn.com,
Himanshu Madhani <himanshu.madhani@...cle.com>,
John Garry <john.g.garry@...cle.com>
Subject: [PATCH RFC 01/16] block: Add atomic write operations to request_queue limits
From: Himanshu Madhani <himanshu.madhani@...cle.com>
Add the following limits:
- atomic_write_boundary
- atomic_write_max_bytes
- atomic_write_unit_max
- atomic_write_unit_min
Signed-off-by: Himanshu Madhani <himanshu.madhani@...cle.com>
Signed-off-by: John Garry <john.g.garry@...cle.com>
---
Documentation/ABI/stable/sysfs-block | 42 +++++++++++++++++++++
block/blk-settings.c | 56 ++++++++++++++++++++++++++++
block/blk-sysfs.c | 33 ++++++++++++++++
include/linux/blkdev.h | 23 ++++++++++++
4 files changed, 154 insertions(+)
diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block
index 282de3680367..f3ed9890e03b 100644
--- a/Documentation/ABI/stable/sysfs-block
+++ b/Documentation/ABI/stable/sysfs-block
@@ -21,6 +21,48 @@ Description:
device is offset from the internal allocation unit's
natural alignment.
+What: /sys/block/<disk>/atomic_write_max_bytes
+Date: May 2023
+Contact: Himanshu Madhani <himanshu.madhani@...cle.com>
+Description:
+ [RO] This parameter specifies the maximum atomic write
+ size reported by the device. An atomic write operation
+ must not exceed this number of bytes.
+
+
+What: /sys/block/<disk>/atomic_write_unit_min
+Date: May 2023
+Contact: Himanshu Madhani <himanshu.madhani@...cle.com>
+Description:
+ [RO] This parameter specifies the smallest block which can
+ be written atomically with an atomic write operation. All
+ atomic write operations must begin at a
+ atomic_write_unit_min boundary and must be multiples of
+ atomic_write_unit_min. This value must be a power-of-two.
+
+
+What: /sys/block/<disk>/atomic_write_unit_max
+Date: January 2023
+Contact: Himanshu Madhani <himanshu.madhani@...cle.com>
+Description:
+ [RO] This parameter defines the largest block which can be
+ written atomically with an atomic write operation. This
+ value must be a multiple of atomic_write_unit_min and must
+ be a power-of-two.
+
+
+What: /sys/block/<disk>/atomic_write_boundary
+Date: May 2023
+Contact: Himanshu Madhani <himanshu.madhani@...cle.com>
+Description:
+ [RO] A device may need to internally split I/Os which
+ straddle a given logical block address boundary. In that
+ case a single atomic write operation will be processed as
+ one of more sub-operations which each complete atomically.
+ This parameter specifies the size in bytes of the atomic
+ boundary if one is reported by the device. This value must
+ be a power-of-two.
+
What: /sys/block/<disk>/diskseq
Date: February 2021
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 896b4654ab00..e21731715a12 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -59,6 +59,9 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->zoned = BLK_ZONED_NONE;
lim->zone_write_granularity = 0;
lim->dma_alignment = 511;
+ lim->atomic_write_unit_min = lim->atomic_write_unit_max = 1;
+ lim->atomic_write_max_bytes = 512;
+ lim->atomic_write_boundary = 0;
}
/**
@@ -183,6 +186,59 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
}
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
+/**
+ * blk_queue_atomic_write_max_bytes - set max bytes supported by
+ * the device for atomic write operations.
+ * @q: the request queue for the device
+ * @size: maximum bytes supported
+ */
+void blk_queue_atomic_write_max_bytes(struct request_queue *q,
+ unsigned int size)
+{
+ q->limits.atomic_write_max_bytes = size;
+}
+EXPORT_SYMBOL(blk_queue_atomic_write_max_bytes);
+
+/**
+ * blk_queue_atomic_write_boundary - Device's logical block address space
+ * which an atomic write should not cross.
+ * @q: the request queue for the device
+ * @size: size in bytes. Must be a power-of-two.
+ */
+void blk_queue_atomic_write_boundary(struct request_queue *q,
+ unsigned int size)
+{
+ q->limits.atomic_write_boundary = size;
+}
+EXPORT_SYMBOL(blk_queue_atomic_write_boundary);
+
+/**
+ * blk_queue_atomic_write_unit_min - smallest unit that can be written
+ * atomically to the device.
+ * @q: the request queue for the device
+ * @sectors: must be a power-of-two.
+ */
+void blk_queue_atomic_write_unit_min(struct request_queue *q,
+ unsigned int sectors)
+{
+ q->limits.atomic_write_unit_min = sectors;
+}
+EXPORT_SYMBOL(blk_queue_atomic_write_unit_min);
+
+/*
+ * blk_queue_atomic_write_unit_max - largest unit that can be written
+ * atomically to the device.
+ * @q: the reqeust queue for the device
+ * @sectors: must be a power-of-two.
+ */
+void blk_queue_atomic_write_unit_max(struct request_queue *q,
+ unsigned int sectors)
+{
+ struct queue_limits *limits = &q->limits;
+ limits->atomic_write_unit_max = sectors;
+}
+EXPORT_SYMBOL(blk_queue_atomic_write_unit_max);
+
/**
* blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
* @q: the request queue for the device
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index f1fce1c7fa44..1025beff2281 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -132,6 +132,30 @@ static ssize_t queue_max_discard_segments_show(struct request_queue *q,
return queue_var_show(queue_max_discard_segments(q), page);
}
+static ssize_t queue_atomic_write_max_bytes_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(q->limits.atomic_write_max_bytes, page);
+}
+
+static ssize_t queue_atomic_write_boundary_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(q->limits.atomic_write_boundary, page);
+}
+
+static ssize_t queue_atomic_write_unit_min_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(queue_atomic_write_unit_min(q), page);
+}
+
+static ssize_t queue_atomic_write_unit_max_show(struct request_queue *q,
+ char *page)
+{
+ return queue_var_show(queue_atomic_write_unit_max(q), page);
+}
+
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
{
return queue_var_show(q->limits.max_integrity_segments, page);
@@ -604,6 +628,11 @@ QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes");
QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes");
QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data");
+QUEUE_RO_ENTRY(queue_atomic_write_max_bytes, "atomic_write_max_bytes");
+QUEUE_RO_ENTRY(queue_atomic_write_boundary, "atomic_write_boundary");
+QUEUE_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max");
+QUEUE_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min");
+
QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes");
QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes");
QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes");
@@ -661,6 +690,10 @@ static struct attribute *queue_attrs[] = {
&queue_discard_max_entry.attr,
&queue_discard_max_hw_entry.attr,
&queue_discard_zeroes_data_entry.attr,
+ &queue_atomic_write_max_bytes_entry.attr,
+ &queue_atomic_write_boundary_entry.attr,
+ &queue_atomic_write_unit_min_entry.attr,
+ &queue_atomic_write_unit_max_entry.attr,
&queue_write_same_max_entry.attr,
&queue_write_zeroes_max_entry.attr,
&queue_zone_append_max_entry.attr,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 941304f17492..6b6f2992338c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -304,6 +304,11 @@ struct queue_limits {
unsigned int discard_alignment;
unsigned int zone_write_granularity;
+ unsigned int atomic_write_boundary;
+ unsigned int atomic_write_max_bytes;
+ unsigned int atomic_write_unit_min;
+ unsigned int atomic_write_unit_max;
+
unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
@@ -929,6 +934,14 @@ void blk_queue_zone_write_granularity(struct request_queue *q,
unsigned int size);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
+extern void blk_queue_atomic_write_max_bytes(struct request_queue *q,
+ unsigned int size);
+extern void blk_queue_atomic_write_unit_max(struct request_queue *q,
+ unsigned int sectors);
+extern void blk_queue_atomic_write_unit_min(struct request_queue *q,
+ unsigned int sectors);
+extern void blk_queue_atomic_write_boundary(struct request_queue *q,
+ unsigned int size);
void disk_update_readahead(struct gendisk *disk);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
@@ -1331,6 +1344,16 @@ static inline int queue_dma_alignment(const struct request_queue *q)
return q ? q->limits.dma_alignment : 511;
}
+static inline unsigned int queue_atomic_write_unit_max(const struct request_queue *q)
+{
+ return q->limits.atomic_write_unit_max << SECTOR_SHIFT;
+}
+
+static inline unsigned int queue_atomic_write_unit_min(const struct request_queue *q)
+{
+ return q->limits.atomic_write_unit_min << SECTOR_SHIFT;
+}
+
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
{
return queue_dma_alignment(bdev_get_queue(bdev));
--
2.31.1
Powered by blists - more mailing lists