[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181115085306.9910-5-ming.lei@redhat.com>
Date: Thu, 15 Nov 2018 16:52:51 +0800
From: Ming Lei <ming.lei@...hat.com>
To: Jens Axboe <axboe@...nel.dk>
Cc: linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, Ming Lei <ming.lei@...hat.com>,
Dave Chinner <dchinner@...hat.com>,
Kent Overstreet <kent.overstreet@...il.com>,
Mike Snitzer <snitzer@...hat.com>, dm-devel@...hat.com,
Alexander Viro <viro@...iv.linux.org.uk>,
linux-fsdevel@...r.kernel.org, Shaohua Li <shli@...nel.org>,
linux-raid@...r.kernel.org, linux-erofs@...ts.ozlabs.org,
David Sterba <dsterba@...e.com>, linux-btrfs@...r.kernel.org,
"Darrick J . Wong" <darrick.wong@...cle.com>,
linux-xfs@...r.kernel.org, Gao Xiang <gaoxiang25@...wei.com>,
Christoph Hellwig <hch@....de>, Theodore Ts'o <tytso@....edu>,
linux-ext4@...r.kernel.org, Coly Li <colyli@...e.de>,
linux-bcache@...r.kernel.org, Boaz Harrosh <ooo@...ctrozaur.com>,
Bob Peterson <rpeterso@...hat.com>, cluster-devel@...hat.com
Subject: [PATCH V10 04/19] block: use bio_for_each_bvec() to map sg
It is more efficient to use bio_for_each_bvec() to map sg, meantime
we have to consider splitting multipage bvec as done in blk_bio_segment_split().
Cc: Dave Chinner <dchinner@...hat.com>
Cc: Kent Overstreet <kent.overstreet@...il.com>
Cc: Mike Snitzer <snitzer@...hat.com>
Cc: dm-devel@...hat.com
Cc: Alexander Viro <viro@...iv.linux.org.uk>
Cc: linux-fsdevel@...r.kernel.org
Cc: Shaohua Li <shli@...nel.org>
Cc: linux-raid@...r.kernel.org
Cc: linux-erofs@...ts.ozlabs.org
Cc: David Sterba <dsterba@...e.com>
Cc: linux-btrfs@...r.kernel.org
Cc: Darrick J. Wong <darrick.wong@...cle.com>
Cc: linux-xfs@...r.kernel.org
Cc: Gao Xiang <gaoxiang25@...wei.com>
Cc: Christoph Hellwig <hch@....de>
Cc: Theodore Ts'o <tytso@....edu>
Cc: linux-ext4@...r.kernel.org
Cc: Coly Li <colyli@...e.de>
Cc: linux-bcache@...r.kernel.org
Cc: Boaz Harrosh <ooo@...ctrozaur.com>
Cc: Bob Peterson <rpeterso@...hat.com>
Cc: cluster-devel@...hat.com
Signed-off-by: Ming Lei <ming.lei@...hat.com>
---
block/blk-merge.c | 72 +++++++++++++++++++++++++++++++++++++++----------------
1 file changed, 52 insertions(+), 20 deletions(-)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 6f7deb94a23f..cb9f49bcfd36 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -473,6 +473,56 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
}
+static struct scatterlist *blk_next_sg(struct scatterlist **sg,
+ struct scatterlist *sglist)
+{
+ if (!*sg)
+ return sglist;
+ else {
+ /*
+ * If the driver previously mapped a shorter
+ * list, we could see a termination bit
+ * prematurely unless it fully inits the sg
+ * table on each mapping. We KNOW that there
+ * must be more entries here or the driver
+ * would be buggy, so force clear the
+ * termination bit to avoid doing a full
+ * sg_init_table() in drivers for each command.
+ */
+ sg_unmark_end(*sg);
+ return sg_next(*sg);
+ }
+}
+
+static unsigned blk_bvec_map_sg(struct request_queue *q,
+ struct bio_vec *bvec, struct scatterlist *sglist,
+ struct scatterlist **sg)
+{
+ unsigned nbytes = bvec->bv_len;
+ unsigned nsegs = 0, total = 0;
+
+ while (nbytes > 0) {
+ unsigned seg_size;
+ struct page *pg;
+ unsigned offset, idx;
+
+ *sg = blk_next_sg(sg, sglist);
+
+ seg_size = min(nbytes, queue_max_segment_size(q));
+ offset = (total + bvec->bv_offset) % PAGE_SIZE;
+ idx = (total + bvec->bv_offset) / PAGE_SIZE;
+ pg = nth_page(bvec->bv_page, idx);
+
+ sg_set_page(*sg, pg, seg_size, offset);
+
+ total += seg_size;
+ nbytes -= seg_size;
+ nsegs++;
+ }
+
+ return nsegs;
+}
+
static inline void
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
struct scatterlist *sglist, struct bio_vec *bvprv,
@@ -490,25 +540,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
(*sg)->length += nbytes;
} else {
new_segment:
- if (!*sg)
- *sg = sglist;
- else {
- /*
- * If the driver previously mapped a shorter
- * list, we could see a termination bit
- * prematurely unless it fully inits the sg
- * table on each mapping. We KNOW that there
- * must be more entries here or the driver
- * would be buggy, so force clear the
- * termination bit to avoid doing a full
- * sg_init_table() in drivers for each command.
- */
- sg_unmark_end(*sg);
- *sg = sg_next(*sg);
- }
-
- sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
- (*nsegs)++;
+ (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg);
}
*bvprv = *bvec;
}
@@ -530,7 +562,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
int cluster = blk_queue_cluster(q), nsegs = 0;
for_each_bio(bio)
- bio_for_each_segment(bvec, bio, iter)
+ bio_for_each_bvec(bvec, bio, iter)
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
&nsegs, &cluster);
--
2.9.5
Powered by blists - more mailing lists