[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230503183821.1473305-7-john.g.garry@oracle.com>
Date: Wed, 3 May 2023 18:38:11 +0000
From: John Garry <john.g.garry@...cle.com>
To: axboe@...nel.dk, kbusch@...nel.org, hch@....de, sagi@...mberg.me,
martin.petersen@...cle.com, djwong@...nel.org,
viro@...iv.linux.org.uk, brauner@...nel.org, dchinner@...hat.com,
jejb@...ux.ibm.com
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-nvme@...ts.infradead.org, linux-scsi@...r.kernel.org,
linux-xfs@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-security-module@...r.kernel.org, paul@...l-moore.com,
jmorris@...ei.org, serge@...lyn.com,
John Garry <john.g.garry@...cle.com>
Subject: [PATCH RFC 06/16] block: Limit atomic writes according to bio and queue limits
We rely the block layer always being able to send a bio of size
atomic_write_unit_max without being required to split it due to request
queue or other bio limits. We already know at any bio should have an
alignment of atomic_write_unit_max or lower.
A bio may contain min(BIO_MAX_VECS, limits->max_segments) vectors,
and each vector of at least PAGE_SIZE, except for if start address may
not be PAGE aligned; for this case, subtract 1 to give the max guaranteed
count of pages which we may store in a bio, and limit both
atomic_write_unit_min and atomic_write_unit_max to this value.
Signed-off-by: John Garry <john.g.garry@...cle.com>
---
block/blk-settings.c | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index e21731715a12..f64a2f736cb8 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -212,6 +212,18 @@ void blk_queue_atomic_write_boundary(struct request_queue *q,
}
EXPORT_SYMBOL(blk_queue_atomic_write_boundary);
+static unsigned int blk_queue_max_guaranteed_bio_size(struct queue_limits *limits)
+{
+ unsigned int max_segments = limits->max_segments;
+ unsigned int atomic_write_max_segments =
+ min(BIO_MAX_VECS, max_segments);
+ /* subtract 1 to assume PAGE-misaligned IOV start address */
+ unsigned int size = (atomic_write_max_segments - 1) *
+ (PAGE_SIZE / SECTOR_SIZE);
+
+ return rounddown_pow_of_two(size);
+}
+
/**
* blk_queue_atomic_write_unit_min - smallest unit that can be written
* atomically to the device.
@@ -221,7 +233,10 @@ EXPORT_SYMBOL(blk_queue_atomic_write_boundary);
void blk_queue_atomic_write_unit_min(struct request_queue *q,
unsigned int sectors)
{
- q->limits.atomic_write_unit_min = sectors;
+ struct queue_limits *limits= &q->limits;
+ unsigned int guaranteed = blk_queue_max_guaranteed_bio_size(limits);
+
+ limits->atomic_write_unit_min = min(guaranteed, sectors);
}
EXPORT_SYMBOL(blk_queue_atomic_write_unit_min);
@@ -234,8 +249,10 @@ EXPORT_SYMBOL(blk_queue_atomic_write_unit_min);
void blk_queue_atomic_write_unit_max(struct request_queue *q,
unsigned int sectors)
{
- struct queue_limits *limits = &q->limits;
- limits->atomic_write_unit_max = sectors;
+ struct queue_limits *limits= &q->limits;
+ unsigned int guaranteed = blk_queue_max_guaranteed_bio_size(limits);
+
+ limits->atomic_write_unit_max = min(guaranteed, sectors);
}
EXPORT_SYMBOL(blk_queue_atomic_write_unit_max);
--
2.31.1
Powered by blists - more mailing lists