[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <4eedad1efab91f4529de19e14ba374da405aea3f.1593340208.git.baolin.wang@linux.alibaba.com>
Date: Sun, 28 Jun 2020 18:34:46 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: kbusch@...nel.org, axboe@...com, hch@....de, sagi@...mberg.me
Cc: baolin.wang@...ux.alibaba.com, baolin.wang7@...il.com,
linux-nvme@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: [RFC PATCH] nvme-pci: Move the sg table allocation/free into init/exit_request
Move the sg table allocation and free into the init_request() and
exit_request(), instead of allocating sg table when queuing requests,
which can benefit the IO performance.
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
drivers/nvme/host/pci.c | 24 ++++++++++++++++++------
1 file changed, 18 insertions(+), 6 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b1d18f0..cf7c997 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -410,9 +410,25 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
iod->nvmeq = nvmeq;
nvme_req(req)->ctrl = &dev->ctrl;
+
+ iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
+ if (!iod->sg)
+ return -ENOMEM;
+
+ sg_init_table(iod->sg, NVME_MAX_SEGS);
return 0;
}
+static void nvme_exit_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_dev *dev = set->driver_data;
+
+ mempool_free(iod->sg, dev->iod_mempool);
+ iod->sg = NULL;
+}
+
static int queue_irq_offset(struct nvme_dev *dev)
{
/* if we have more than 1 vec, admin queue offsets us by 1 */
@@ -557,8 +573,6 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
dma_pool_free(dev->prp_page_pool, addr, dma_addr);
dma_addr = next_dma_addr;
}
-
- mempool_free(iod->sg, dev->iod_mempool);
}
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@ -808,10 +822,6 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
}
iod->dma_len = 0;
- iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
- return BLK_STS_RESOURCE;
- sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
if (!iod->nents)
goto out;
@@ -1557,6 +1567,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
.complete = nvme_pci_complete_rq,
.init_hctx = nvme_admin_init_hctx,
.init_request = nvme_init_request,
+ .exit_request = nvme_exit_request,
.timeout = nvme_timeout,
};
@@ -1566,6 +1577,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
.commit_rqs = nvme_commit_rqs,
.init_hctx = nvme_init_hctx,
.init_request = nvme_init_request,
+ .exit_request = nvme_exit_request,
.map_queues = nvme_pci_map_queues,
.timeout = nvme_timeout,
.poll = nvme_poll,
--
1.8.3.1
Powered by blists - more mailing lists