[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211117215410.3695-12-logang@deltatee.com>
Date: Wed, 17 Nov 2021 14:53:58 -0700
From: Logan Gunthorpe <logang@...tatee.com>
To: linux-kernel@...r.kernel.org, linux-nvme@...ts.infradead.org,
linux-block@...r.kernel.org, linux-pci@...r.kernel.org,
linux-mm@...ck.org, iommu@...ts.linux-foundation.org
Cc: Stephen Bates <sbates@...thlin.com>,
Christoph Hellwig <hch@....de>,
Dan Williams <dan.j.williams@...el.com>,
Jason Gunthorpe <jgg@...pe.ca>,
Christian König <christian.koenig@....com>,
John Hubbard <jhubbard@...dia.com>,
Don Dutile <ddutile@...hat.com>,
Matthew Wilcox <willy@...radead.org>,
Daniel Vetter <daniel.vetter@...ll.ch>,
Jakowski Andrzej <andrzej.jakowski@...el.com>,
Minturn Dave B <dave.b.minturn@...el.com>,
Jason Ekstrand <jason@...kstrand.net>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Xiong Jianxin <jianxin.xiong@...el.com>,
Bjorn Helgaas <helgaas@...nel.org>,
Ira Weiny <ira.weiny@...el.com>,
Robin Murphy <robin.murphy@....com>,
Martin Oliveira <martin.oliveira@...eticom.com>,
Chaitanya Kulkarni <ckulkarnilinux@...il.com>,
Logan Gunthorpe <logang@...tatee.com>,
Max Gurtovoy <mgurtovoy@...dia.com>
Subject: [PATCH v4 11/23] nvme-pci: convert to using dma_map_sgtable()
The dma_map operations now support P2PDMA pages directly. So remove
the calls to pci_p2pdma_[un]map_sg_attrs() and replace them with calls
to dma_map_sgtable().
dma_map_sgtable() returns more complete error codes than dma_map_sg()
and allows differentiating EREMOTEIO errors in case an unsupported
P2PDMA transfer is requested. When this happens, return BLK_STS_TARGET
so the request isn't retried.
Signed-off-by: Logan Gunthorpe <logang@...tatee.com>
Reviewed-by: Max Gurtovoy <mgurtovoy@...dia.com>
---
drivers/nvme/host/pci.c | 69 +++++++++++++++++------------------------
1 file changed, 29 insertions(+), 40 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 72f623999ba5..3f2bd1efe076 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -229,11 +229,10 @@ struct nvme_iod {
bool use_sgl;
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
- int nents; /* Used in scatterlist */
dma_addr_t first_dma;
unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t meta_dma;
- struct scatterlist *sg;
+ struct sg_table sgt;
};
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
@@ -531,7 +530,7 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
static void **nvme_pci_iod_list(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
+ return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
}
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
@@ -583,17 +582,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
}
}
-static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req));
- else
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
-}
-
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -604,9 +592,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
return;
}
- WARN_ON_ONCE(!iod->nents);
+ WARN_ON_ONCE(!iod->sgt.nents);
+
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
- nvme_unmap_sg(dev, req);
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
iod->first_dma);
@@ -614,7 +603,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
nvme_free_sgls(dev, req);
else
nvme_free_prps(dev, req);
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
}
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@ -637,7 +626,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
int length = blk_rq_payload_bytes(req);
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
@@ -710,16 +699,16 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_len = sg_dma_len(sg);
}
done:
- cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
return BLK_STS_OK;
free_prps:
nvme_free_prps(dev, req);
return BLK_STS_RESOURCE;
bad_sgl:
- WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
+ WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
"Invalid SGL for payload:%d nents:%d\n",
- blk_rq_payload_bytes(req), iod->nents);
+ blk_rq_payload_bytes(req), iod->sgt.nents);
return BLK_STS_IOERR;
}
@@ -745,12 +734,13 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
}
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmd, int entries)
+ struct request *req, struct nvme_rw_command *cmd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
struct nvme_sgl_desc *sg_list;
- struct scatterlist *sg = iod->sg;
+ struct scatterlist *sg = iod->sgt.sgl;
+ int entries = iod->sgt.nents;
dma_addr_t sgl_dma;
int i = 0;
@@ -848,7 +838,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret = BLK_STS_RESOURCE;
- int nr_mapped;
+ int rc;
if (blk_rq_nr_phys_segments(req) == 1) {
struct bio_vec bv = req_bvec(req);
@@ -866,26 +856,25 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
}
iod->dma_len = 0;
- iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
+ iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
+ if (!iod->sgt.sgl)
return BLK_STS_RESOURCE;
- sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
- if (!iod->nents)
+ sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
+ iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
+ if (!iod->sgt.orig_nents)
goto out_free_sg;
- if (is_pci_p2pdma_page(sg_page(iod->sg)))
- nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
- iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
- else
- nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
- rq_dma_dir(req), DMA_ATTR_NO_WARN);
- if (!nr_mapped)
+ rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
+ DMA_ATTR_NO_WARN);
+ if (rc) {
+ if (rc == -EREMOTEIO)
+ ret = BLK_STS_TARGET;
goto out_free_sg;
+ }
iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
- ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
+ ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
if (ret != BLK_STS_OK)
@@ -893,9 +882,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
return BLK_STS_OK;
out_unmap_sg:
- nvme_unmap_sg(dev, req);
+ dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
out_free_sg:
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sgt.sgl, dev->iod_mempool);
return ret;
}
@@ -928,7 +917,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
iod->aborted = 0;
iod->npages = -1;
- iod->nents = 0;
+ iod->sgt.nents = 0;
/*
* We should not need to do this, but we're still using this to
--
2.30.2
Powered by blists - more mailing lists