[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1284adf3-7e93-4530-9921-408c5eaeb337@kernel.org>
Date: Fri, 18 Apr 2025 17:02:38 +0900
From: Damien Le Moal <dlemoal@...nel.org>
To: Leon Romanovsky <leon@...nel.org>,
Marek Szyprowski <m.szyprowski@...sung.com>, Jens Axboe <axboe@...nel.dk>,
Christoph Hellwig <hch@....de>, Keith Busch <kbusch@...nel.org>
Cc: Kanchan Joshi <joshi.k@...sung.com>, Jake Edge <jake@....net>,
Jonathan Corbet <corbet@....net>, Jason Gunthorpe <jgg@...pe.ca>,
Zhu Yanjun <zyjzyj2000@...il.com>, Robin Murphy <robin.murphy@....com>,
Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>,
Sagi Grimberg <sagi@...mberg.me>, Bjorn Helgaas <bhelgaas@...gle.com>,
Logan Gunthorpe <logang@...tatee.com>, Yishai Hadas <yishaih@...dia.com>,
Shameer Kolothum <shameerali.kolothum.thodi@...wei.com>,
Kevin Tian <kevin.tian@...el.com>,
Alex Williamson <alex.williamson@...hat.com>,
Jérôme Glisse <jglisse@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-block@...r.kernel.org,
linux-rdma@...r.kernel.org, iommu@...ts.linux.dev,
linux-nvme@...ts.infradead.org, linux-pci@...r.kernel.org,
kvm@...r.kernel.org, linux-mm@...ck.org,
Niklas Schnelle <schnelle@...ux.ibm.com>,
Chuck Lever <chuck.lever@...cle.com>, Luis Chamberlain <mcgrof@...nel.org>,
Matthew Wilcox <willy@...radead.org>, Dan Williams
<dan.j.williams@...el.com>, Chaitanya Kulkarni <kch@...dia.com>,
Nitesh Shetty <nj.shetty@...sung.com>, Leon Romanovsky <leonro@...dia.com>
Subject: Re: [PATCH v8 24/24] nvme-pci: optimize single-segment handling
On 4/18/25 15:47, Leon Romanovsky wrote:
> From: Kanchan Joshi <joshi.k@...sung.com>
>
> blk_rq_dma_map API is costly for single-segment requests.
> Avoid using it and map the bio_vec directly.
>
> Signed-off-by: Kanchan Joshi <joshi.k@...sung.com>
> Signed-off-by: Nitesh Shetty <nj.shetty@...sung.com>
> Signed-off-by: Leon Romanovsky <leonro@...dia.com>
> ---
> drivers/nvme/host/pci.c | 65 +++++++++++++++++++++++++++++++++++++----
> 1 file changed, 60 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index 8d99a8f871ea..cf020de82962 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -216,6 +216,11 @@ struct nvme_queue {
> struct completion delete_done;
> };
>
> +enum {
> + IOD_LARGE_DESCRIPTORS = 1, /* uses the full page sized descriptor pool */
> + IOD_SINGLE_SEGMENT = 2, /* single segment dma mapping */
> +};
> +
> /*
> * The nvme_iod describes the data in an I/O.
> */
> @@ -224,7 +229,7 @@ struct nvme_iod {
> struct nvme_command cmd;
> bool aborted;
> u8 nr_descriptors; /* # of PRP/SGL descriptors */
> - bool large_descriptors; /* uses the full page sized descriptor pool */
> + unsigned int flags;
> unsigned int total_len; /* length of the entire transfer */
> unsigned int total_meta_len; /* length of the entire metadata transfer */
> dma_addr_t meta_dma;
> @@ -529,7 +534,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
> static inline struct dma_pool *nvme_dma_pool(struct nvme_dev *dev,
> struct nvme_iod *iod)
> {
> - if (iod->large_descriptors)
> + if (iod->flags & IOD_LARGE_DESCRIPTORS)
> return dev->prp_page_pool;
> return dev->prp_small_pool;
> }
> @@ -630,6 +635,15 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
> static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
> {
> struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
> + unsigned int nr_segments = blk_rq_nr_phys_segments(req);
> + dma_addr_t dma_addr;
> +
> + if (nr_segments == 1 && (iod->flags & IOD_SINGLE_SEGMENT)) {
nvme_pci_setup_prps() calls nvme_try_setup_prp_simple() which sets
IOD_SINGLE_SEGMENT if and only if the req has a single phys segment. So why do
you need to count the segments again here ? Looking at the flag only should be
enough, no ?
> + dma_addr = le64_to_cpu(iod->cmd.common.dptr.prp1);
> + dma_unmap_page(dev->dev, dma_addr, iod->total_len,
> + rq_dma_dir(req));
> + return;
> + }
>
> if (!blk_rq_dma_unmap(req, dev->dev, &iod->dma_state, iod->total_len)) {
> if (iod->cmd.common.flags & NVME_CMD_SGL_METABUF)
> @@ -642,6 +656,41 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
> nvme_free_descriptors(dev, req);
> }
>
> +static bool nvme_try_setup_prp_simple(struct nvme_dev *dev, struct request *req,
> + struct nvme_rw_command *cmnd,
> + struct blk_dma_iter *iter)
> +{
> + struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
> + struct bio_vec bv = req_bvec(req);
> + unsigned int first_prp_len;
> +
> + if (is_pci_p2pdma_page(bv.bv_page))
> + return false;
> + if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) + bv.bv_len >
> + NVME_CTRL_PAGE_SIZE * 2)
> + return false;
> +
> + iter->addr = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
> + if (dma_mapping_error(dev->dev, iter->addr)) {
> + iter->status = BLK_STS_RESOURCE;
> + goto out;
> + }
> + iod->total_len = bv.bv_len;
> + cmnd->dptr.prp1 = cpu_to_le64(iter->addr);
> +
> + first_prp_len = NVME_CTRL_PAGE_SIZE -
> + (bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1));
> + if (bv.bv_len > first_prp_len)
> + cmnd->dptr.prp2 = cpu_to_le64(iter->addr + first_prp_len);
> + else
> + cmnd->dptr.prp2 = 0;
> +
> + iter->status = BLK_STS_OK;
> + iod->flags |= IOD_SINGLE_SEGMENT;
> +out:
> + return true;
> +}
> +
> static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
> struct request *req)
> {
> @@ -652,6 +701,12 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
> dma_addr_t prp1_dma, prp2_dma = 0;
> unsigned int prp_len, i;
> __le64 *prp_list;
> + unsigned int nr_segments = blk_rq_nr_phys_segments(req);
> +
> + if (nr_segments == 1) {
> + if (nvme_try_setup_prp_simple(dev, req, cmnd, &iter))
> + return iter.status;
> + }
>
> if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter))
> return iter.status;
> @@ -693,7 +748,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
>
> if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) >
> NVME_SMALL_DESCRIPTOR_SIZE / sizeof(__le64))
> - iod->large_descriptors = true;
> + iod->flags |= IOD_LARGE_DESCRIPTORS;
>
> prp_list = dma_pool_alloc(nvme_dma_pool(dev, iod), GFP_ATOMIC,
> &prp2_dma);
> @@ -808,7 +863,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
> }
>
> if (entries > NVME_SMALL_DESCRIPTOR_SIZE / sizeof(*sg_list))
> - iod->large_descriptors = true;
> + iod->flags |= IOD_LARGE_DESCRIPTORS;
>
> sg_list = dma_pool_alloc(nvme_dma_pool(dev, iod), GFP_ATOMIC, &sgl_dma);
> if (!sg_list)
> @@ -932,7 +987,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
>
> iod->aborted = false;
> iod->nr_descriptors = 0;
> - iod->large_descriptors = false;
> + iod->flags = 0;
> iod->total_len = 0;
> iod->total_meta_len = 0;
>
--
Damien Le Moal
Western Digital Research
Powered by blists - more mailing lists