[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190620161240.22738-17-logang@deltatee.com>
Date: Thu, 20 Jun 2019 10:12:28 -0600
From: Logan Gunthorpe <logang@...tatee.com>
To: linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
linux-nvme@...ts.infradead.org, linux-pci@...r.kernel.org,
linux-rdma@...r.kernel.org
Cc: Jens Axboe <axboe@...nel.dk>, Christoph Hellwig <hch@....de>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Dan Williams <dan.j.williams@...el.com>,
Sagi Grimberg <sagi@...mberg.me>,
Keith Busch <kbusch@...nel.org>,
Jason Gunthorpe <jgg@...pe.ca>,
Stephen Bates <sbates@...thlin.com>,
Logan Gunthorpe <logang@...tatee.com>
Subject: [RFC PATCH 16/28] block: Implement mapping dma-direct requests to SGs in blk_rq_map_sg()
blk_rq_map_sg() just needs to move the dma_vec into the dma_address
of the sgl. Callers will need to ensure not to call dma_map_sg()
for dma-direct requests.
This will likely get less ugly with Christoph's proposed cleanup
to the DMA API. It will be much simpler if devices are just
calling a dma_map_bio() and don't have to worry about dma-direct
requests.
Signed-off-by: Logan Gunthorpe <logang@...tatee.com>
---
block/blk-merge.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 65 insertions(+)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index a7a5453987f9..ccd6c44b9f6e 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -545,6 +545,69 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
return nsegs;
}
+static unsigned blk_dvec_map_sg(struct request_queue *q,
+ struct dma_vec *dvec, struct scatterlist *sglist,
+ struct scatterlist **sg)
+{
+ unsigned nbytes = dvec->dv_len;
+ unsigned nsegs = 0, total = 0;
+
+ while (nbytes > 0) {
+ unsigned seg_size;
+
+ *sg = blk_next_sg(sg, sglist);
+
+ seg_size = get_max_segment_size(q, total);
+ seg_size = min(nbytes, seg_size);
+
+ (*sg)->dma_address = dvec->dv_addr + total;
+ sg_dma_len(*sg) = seg_size;
+
+ total += seg_size;
+ nbytes -= seg_size;
+ nsegs++;
+ }
+
+ return nsegs;
+}
+
+static inline void
+__blk_segment_dma_map_sg(struct request_queue *q, struct dma_vec *dvec,
+ struct scatterlist *sglist, struct dma_vec *dvprv,
+ struct scatterlist **sg, int *nsegs)
+{
+ int nbytes = dvec->dv_len;
+
+ if (*sg) {
+ if ((*sg)->length + nbytes > queue_max_segment_size(q))
+ goto new_segment;
+ if (!dmavec_phys_mergeable(q, dvprv, dvec))
+ goto new_segment;
+
+ (*sg)->length += nbytes;
+ } else {
+new_segment:
+ (*nsegs) += blk_dvec_map_sg(q, dvec, sglist, sg);
+ }
+ *dvprv = *dvec;
+}
+
+static int __blk_dma_bios_map_sg(struct request_queue *q, struct bio *bio,
+ struct scatterlist *sglist,
+ struct scatterlist **sg)
+{
+ struct dma_vec dvec, dvprv = {};
+ struct bvec_iter iter;
+ int nsegs = 0;
+
+ for_each_bio(bio)
+ bio_for_each_dvec(dvec, bio, iter)
+ __blk_segment_dma_map_sg(q, &dvec, sglist, &dvprv,
+ sg, &nsegs);
+
+ return nsegs;
+}
+
/*
* map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
@@ -559,6 +622,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
+ else if (blk_rq_is_dma_direct(rq))
+ nsegs = __blk_dma_bios_map_sg(q, rq->bio, sglist, &sg);
else if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
--
2.20.1
Powered by blists - more mailing lists