[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210114154723.2495814-8-satyat@google.com>
Date: Thu, 14 Jan 2021 15:47:23 +0000
From: Satya Tangirala <satyat@...gle.com>
To: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org
Cc: Jens Axboe <axboe@...nel.dk>, Eric Biggers <ebiggers@...gle.com>,
Satya Tangirala <satyat@...gle.com>
Subject: [PATCH 7/7] block: compute nsegs more accurately in blk_bio_segment_split()
Previously, we rounded down the number of sectors just before calling
bio_split() in blk_bio_segment_split(). While this ensures that bios
are not split in the middle of a data unit, it makes it possible
for nsegs to be overestimated. This patch calculates nsegs accurately (it
calculates the smallest number of segments required for the aligned number
of sectors in the split bio).
Signed-off-by: Satya Tangirala <satyat@...gle.com>
---
block/blk-merge.c | 97 ++++++++++++++++++++++++++++++++++++++---------
1 file changed, 80 insertions(+), 17 deletions(-)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 45cda45c1066..58428d348661 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -145,17 +145,17 @@ static inline unsigned get_max_io_size(struct request_queue *q,
struct bio *bio)
{
unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
- unsigned max_sectors = sectors;
unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
- unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
+ unsigned pbs_aligned_sector =
+ round_down(sectors + bio->bi_iter.bi_sector, pbs);
- max_sectors += start_offset;
- max_sectors &= ~(pbs - 1);
- if (max_sectors > start_offset)
- return max_sectors - start_offset;
+ lbs = max(lbs, blk_crypto_bio_sectors_alignment(bio));
- return sectors & ~(lbs - 1);
+ if (pbs_aligned_sector >= bio->bi_iter.bi_sector + lbs)
+ sectors = pbs_aligned_sector;
+
+ return round_down(sectors, lbs);
}
static inline unsigned get_max_segment_size(const struct request_queue *q,
@@ -174,6 +174,41 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
(unsigned long)queue_max_segment_size(q));
}
+/**
+ * update_aligned_sectors_and_segs() - Ensures that *@...gned_sectors is aligned
+ * to @bio_sectors_alignment, and that
+ * *@...gned_segs is the value of nsegs
+ * when sectors reached/first exceeded that
+ * value of *@...gned_sectors.
+ *
+ * @nsegs: [in] The current number of segs
+ * @sectors: [in] The current number of sectors
+ * @aligned_segs: [in,out] The number of segments that make up @aligned_sectors
+ * @aligned_sectors: [in,out] The largest number of sectors <= @sectors that is
+ * aligned to @sectors
+ * @bio_sectors_alignment: [in] The alignment requirement for the number of
+ * sectors
+ *
+ * Updates *@...gned_sectors to the largest number <= @sectors that is also a
+ * multiple of @bio_sectors_alignment. This is done by updating *@...gned_sectors
+ * whenever @sectors is at least @bio_sectors_alignment more than
+ * *@...gned_sectors, since that means we can increment *@...gned_sectors while
+ * still keeping it aligned to @bio_sectors_alignment and also keeping it <=
+ * @sectors. *@...gned_segs is updated to the value of nsegs when @sectors first
+ * reaches/exceeds any value that causes *@...gned_sectors to be updated.
+ */
+static inline void update_aligned_sectors_and_segs(const unsigned int nsegs,
+ const unsigned int sectors,
+ unsigned int *aligned_segs,
+ unsigned int *aligned_sectors,
+ const unsigned int bio_sectors_alignment)
+{
+ if (sectors - *aligned_sectors < bio_sectors_alignment)
+ return;
+ *aligned_sectors = round_down(sectors, bio_sectors_alignment);
+ *aligned_segs = nsegs;
+}
+
/**
* bvec_split_segs - verify whether or not a bvec should be split in the middle
* @q: [in] request queue associated with the bio associated with @bv
@@ -195,9 +230,12 @@ static inline unsigned get_max_segment_size(const struct request_queue *q,
* the block driver.
*/
static bool bvec_split_segs(const struct request_queue *q,
- const struct bio_vec *bv, unsigned *nsegs,
- unsigned *sectors, unsigned max_segs,
- unsigned max_sectors)
+ const struct bio_vec *bv, unsigned int *nsegs,
+ unsigned int *sectors, unsigned int *aligned_segs,
+ unsigned int *aligned_sectors,
+ unsigned int bio_sectors_alignment,
+ unsigned int max_segs,
+ unsigned int max_sectors)
{
unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
unsigned len = min(bv->bv_len, max_len);
@@ -211,6 +249,11 @@ static bool bvec_split_segs(const struct request_queue *q,
(*nsegs)++;
total_len += seg_size;
+ update_aligned_sectors_and_segs(*nsegs,
+ *sectors + (total_len >> 9),
+ aligned_segs,
+ aligned_sectors,
+ bio_sectors_alignment);
len -= seg_size;
if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
@@ -258,6 +301,9 @@ static int blk_bio_segment_split(struct request_queue *q,
unsigned nsegs = 0, sectors = 0;
const unsigned max_sectors = get_max_io_size(q, bio);
const unsigned max_segs = queue_max_segments(q);
+ const unsigned int bio_sectors_alignment =
+ blk_crypto_bio_sectors_alignment(bio);
+ unsigned int aligned_segs = 0, aligned_sectors = 0;
bio_for_each_bvec(bv, bio, iter) {
/*
@@ -272,8 +318,14 @@ static int blk_bio_segment_split(struct request_queue *q,
bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
nsegs++;
sectors += bv.bv_len >> 9;
- } else if (bvec_split_segs(q, &bv, &nsegs, §ors, max_segs,
- max_sectors)) {
+ update_aligned_sectors_and_segs(nsegs, sectors,
+ &aligned_segs,
+ &aligned_sectors,
+ bio_sectors_alignment);
+ } else if (bvec_split_segs(q, &bv, &nsegs, §ors,
+ &aligned_segs, &aligned_sectors,
+ bio_sectors_alignment, max_segs,
+ max_sectors)) {
goto split;
}
@@ -281,11 +333,18 @@ static int blk_bio_segment_split(struct request_queue *q,
bvprvp = &bvprv;
}
+ /*
+ * The input bio's number of sectors is assumed to be aligned to
+ * bio_sectors_alignment. If that's the case, then this function should
+ * ensure that aligned_segs == nsegs and aligned_sectors == sectors if
+ * the bio is not going to be split.
+ */
+ WARN_ON(aligned_segs != nsegs || aligned_sectors != sectors);
*segs = nsegs;
*split = NULL;
return 0;
split:
- *segs = nsegs;
+ *segs = aligned_segs;
/*
* Bio splitting may cause subtle trouble such as hang when doing sync
@@ -294,10 +353,9 @@ static int blk_bio_segment_split(struct request_queue *q,
*/
bio->bi_opf &= ~REQ_HIPRI;
- sectors = round_down(sectors, blk_crypto_bio_sectors_alignment(bio));
- if (WARN_ON(sectors == 0))
+ if (WARN_ON(aligned_sectors == 0))
return -EIO;
- *split = bio_split(bio, sectors, GFP_NOIO, bs);
+ *split = bio_split(bio, aligned_sectors, GFP_NOIO, bs);
return 0;
}
@@ -395,6 +453,9 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
{
unsigned int nr_phys_segs = 0;
unsigned int nr_sectors = 0;
+ unsigned int nr_aligned_phys_segs = 0;
+ unsigned int nr_aligned_sectors = 0;
+ unsigned int bio_sectors_alignment;
struct req_iterator iter;
struct bio_vec bv;
@@ -410,9 +471,11 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
return 1;
}
+ bio_sectors_alignment = blk_crypto_bio_sectors_alignment(rq->bio);
rq_for_each_bvec(bv, rq, iter)
bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
- UINT_MAX, UINT_MAX);
+ &nr_aligned_phys_segs, &nr_aligned_sectors,
+ bio_sectors_alignment, UINT_MAX, UINT_MAX);
return nr_phys_segs;
}
--
2.30.0.284.gd98b1dd5eaa7-goog
Powered by blists - more mailing lists