[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20260112143810.2046599-1-nj.shetty@samsung.com>
Date: Mon, 12 Jan 2026 20:08:08 +0530
From: Nitesh Shetty <nj.shetty@...sung.com>
To: Jens Axboe <axboe@...nel.dk>, Keith Busch <kbusch@...nel.org>, Christoph
Hellwig <hch@....de>, Sagi Grimberg <sagi@...mberg.me>
Cc: nitheshshetty@...il.com, Nitesh Shetty <nj.shetty@...sung.com>,
linux-block@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-nvme@...ts.infradead.org
Subject: [PATCH] block, nvme: remove unused dma_iova_state function
parameter
DMA IOVA state is not used inside blk_rq_dma_map_iter_next
Signed-off-by: Nitesh Shetty <nj.shetty@...sung.com>
---
block/blk-mq-dma.c | 3 +--
drivers/nvme/host/pci.c | 5 ++---
include/linux/blk-mq-dma.h | 2 +-
3 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
index 752060d7261cb..3c87779cdc19d 100644
--- a/block/blk-mq-dma.c
+++ b/block/blk-mq-dma.c
@@ -233,7 +233,6 @@ EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start);
* blk_rq_dma_map_iter_next - map the next DMA segment for a request
* @req: request to map
* @dma_dev: device to map to
- * @state: DMA IOVA state
* @iter: block layer DMA iterator
*
* Iterate to the next mapping after a previous call to
@@ -248,7 +247,7 @@ EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start);
* returned in @iter.status.
*/
bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, struct blk_dma_iter *iter)
+ struct blk_dma_iter *iter)
{
struct phys_vec vec;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3b528369f5454..065555576d2f9 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -823,7 +823,7 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
if (iter->len)
return true;
- if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter))
+ if (!blk_rq_dma_map_iter_next(req, dma_dev, iter))
return false;
if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
@@ -1010,8 +1010,7 @@ static blk_status_t nvme_pci_setup_data_sgl(struct request *req,
}
nvme_pci_sgl_set_data(&sg_list[mapped++], iter);
iod->total_len += iter->len;
- } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, &iod->dma_state,
- iter));
+ } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, iter));
nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped);
if (unlikely(iter->status))
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
index cb88fc791fbd1..214c181ff2c9c 100644
--- a/include/linux/blk-mq-dma.h
+++ b/include/linux/blk-mq-dma.h
@@ -28,7 +28,7 @@ struct blk_dma_iter {
bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
struct dma_iova_state *state, struct blk_dma_iter *iter);
bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, struct blk_dma_iter *iter);
+ struct blk_dma_iter *iter);
/**
* blk_rq_dma_map_coalesce - were all segments coalesced?
--
2.39.5
Powered by blists - more mailing lists