[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240124113841.31824-3-john.g.garry@oracle.com>
Date: Wed, 24 Jan 2024 11:38:28 +0000
From: John Garry <john.g.garry@...cle.com>
To: axboe@...nel.dk, kbusch@...nel.org, hch@....de, sagi@...mberg.me,
jejb@...ux.ibm.com, martin.petersen@...cle.com, djwong@...nel.org,
viro@...iv.linux.org.uk, brauner@...nel.org, dchinner@...hat.com,
jack@...e.cz
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-nvme@...ts.infradead.org, linux-xfs@...r.kernel.org,
linux-fsdevel@...r.kernel.org, tytso@....edu, jbongio@...gle.com,
linux-scsi@...r.kernel.org, ming.lei@...hat.com, ojaswin@...ux.ibm.com,
bvanassche@....org, John Garry <john.g.garry@...cle.com>
Subject: [PATCH v3 02/15] block: Limit atomic writes according to bio and queue limits
We rely the block layer always being able to send a bio of size
atomic_write_unit_max without being required to split it due to request
queue or other bio limits.
A bio may contain min(BIO_MAX_VECS, limits->max_segments) vectors on the
relevant submission paths for atomic writes and each vector contains at
least a PAGE_SIZE, apart from the first and last vectors.
Signed-off-by: John Garry <john.g.garry@...cle.com>
---
block/blk-settings.c | 28 ++++++++++++++++++++++++++--
1 file changed, 26 insertions(+), 2 deletions(-)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 11c0361c2313..176f26374abc 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -108,18 +108,42 @@ void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
}
EXPORT_SYMBOL(blk_queue_bounce_limit);
+
+/*
+ * Returns max guaranteed sectors which we can fit in a bio. For convenience of
+ * users, rounddown_pow_of_two() the return value.
+ *
+ * We always assume that we can fit in at least PAGE_SIZE in a segment, apart
+ * from first and last segments.
+ */
+static unsigned int blk_queue_max_guaranteed_bio_sectors(
+ struct queue_limits *limits,
+ struct request_queue *q)
+{
+ unsigned int max_segments = min(BIO_MAX_VECS, limits->max_segments);
+ unsigned int length;
+
+ length = min(max_segments, 2) * queue_logical_block_size(q);
+ if (max_segments > 2)
+ length += (max_segments - 2) * PAGE_SIZE;
+
+ return rounddown_pow_of_two(length >> SECTOR_SHIFT);
+}
+
static void blk_atomic_writes_update_limits(struct request_queue *q)
{
struct queue_limits *limits = &q->limits;
unsigned int max_hw_sectors =
rounddown_pow_of_two(limits->max_hw_sectors);
+ unsigned int unit_limit = min(max_hw_sectors,
+ blk_queue_max_guaranteed_bio_sectors(limits, q));
limits->atomic_write_max_sectors =
min(limits->atomic_write_hw_max_sectors, max_hw_sectors);
limits->atomic_write_unit_min_sectors =
- min(limits->atomic_write_hw_unit_min_sectors, max_hw_sectors);
+ min(limits->atomic_write_hw_unit_min_sectors, unit_limit);
limits->atomic_write_unit_max_sectors =
- min(limits->atomic_write_hw_unit_max_sectors, max_hw_sectors);
+ min(limits->atomic_write_hw_unit_max_sectors, unit_limit);
}
/**
--
2.31.1
Powered by blists - more mailing lists