[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230728075753epcms2p7354d445f1888ab3942a16e26a13d5bbd@epcms2p7>
Date: Fri, 28 Jul 2023 16:57:53 +0900
From: Jinyoung Choi <j-young.choi@...sung.com>
To: "hch@....de" <hch@....de>,
"martin.petersen@...cle.com" <martin.petersen@...cle.com>,
"axboe@...nel.dk" <axboe@...nel.dk>,
"sagi@...mberg.me" <sagi@...mberg.me>,
"kbusch@...nel.org" <kbusch@...nel.org>,
"chaitanya.kulkarni@....com" <chaitanya.kulkarni@....com>,
"linux-block@...r.kernel.org" <linux-block@...r.kernel.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: [PATCH 2/2] bio-integrity: create multi-page bvecs in
bio_integrity_add_page()
Allow bio_integrity_add_page to create multi-page bvecs, just like
the bio payloads. This simplifies adding larger payloads, and fixes
support for non-tiny workloads with nvme, which stopped using
scatterlist for metadata a while ago
Cc: Christoph Hellwig <hch@....de>
Cc: Martin K. Petersen <martin.petersen@...cle.com>
Fixes: 783b94bd9250 ("nvme-pci: do not build a scatterlist to map metadata")
Signed-off-by: Jinyoung Choi <j-young.choi@...sung.com>
---
block/bio-integrity.c | 50 ++++++++++++++++++++++++-------------------
1 file changed, 28 insertions(+), 22 deletions(-)
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 045553a164e0..ae054c6a06cb 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -123,21 +123,38 @@ void bio_integrity_free(struct bio *bio)
int bio_integrity_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
+ struct request_queue *q = bdev_get_queue(bio->bi_bdev);
struct bio_integrity_payload *bip = bio_integrity(bio);
- if (bip->bip_vcnt >= bip->bip_max_vcnt) {
- printk(KERN_ERR "%s: bip_vec full\n", __func__);
+ if (((bip->bip_iter.bi_size + len) >> SECTOR_SHIFT) >
+ queue_max_hw_sectors(q))
return 0;
- }
- if (bip->bip_vcnt &&
- bvec_gap_to_prev(&bdev_get_queue(bio->bi_bdev)->limits,
- &bip->bip_vec[bip->bip_vcnt - 1], offset))
- return 0;
+ if (bip->bip_vcnt > 0) {
+ struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
+ bool same_page = false;
+
+ if (bvec_try_merge_hw_page(q, bv, page, len, offset,
+ &same_page)) {
+ bip->bip_iter.bi_size += len;
+ return len;
+ }
+
+ if (bip->bip_vcnt >=
+ min(bip->bip_max_vcnt, queue_max_integrity_segments(q)))
+ return 0;
+
+ /*
+ * If the queue doesn't support SG gaps and adding this segment
+ * would create a gap, disallow it.
+ */
+ if (bvec_gap_to_prev(&q->limits, bv, offset))
+ return 0;
+ }
bvec_set_page(&bip->bip_vec[bip->bip_vcnt], page, len, offset);
bip->bip_vcnt++;
-
+ bip->bip_iter.bi_size += len;
return len;
}
EXPORT_SYMBOL(bio_integrity_add_page);
@@ -244,7 +261,6 @@ bool bio_integrity_prep(struct bio *bio)
}
bip->bip_flags |= BIP_BLOCK_INTEGRITY;
- bip->bip_iter.bi_size = len;
bip_set_seed(bip, bio->bi_iter.bi_sector);
if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM)
@@ -252,27 +268,18 @@ bool bio_integrity_prep(struct bio *bio)
/* Map it */
offset = offset_in_page(buf);
- for (i = 0 ; i < nr_pages ; i++) {
- int ret;
+ for (i = 0; i < nr_pages && len > 0; i++) {
bytes = PAGE_SIZE - offset;
- if (len <= 0)
- break;
-
if (bytes > len)
bytes = len;
- ret = bio_integrity_add_page(bio, virt_to_page(buf),
- bytes, offset);
-
- if (ret == 0) {
+ if (bio_integrity_add_page(bio, virt_to_page(buf),
+ bytes, offset) < bytes) {
printk(KERN_ERR "could not attach integrity payload\n");
goto err_end_io;
}
- if (ret < bytes)
- break;
-
buf += bytes;
len -= bytes;
offset = 0;
@@ -291,7 +298,6 @@ bool bio_integrity_prep(struct bio *bio)
bio->bi_status = BLK_STS_RESOURCE;
bio_endio(bio);
return false;
-
}
EXPORT_SYMBOL(bio_integrity_prep);
--
2.34.1
Powered by blists - more mailing lists