[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <04baf1fdff8a04197d5f64c2653c29e7482a2840.1760369219.git.leon@kernel.org>
Date: Mon, 13 Oct 2025 18:34:10 +0300
From: Leon Romanovsky <leon@...nel.org>
To: Christoph Hellwig <hch@....de>,
Jens Axboe <axboe@...nel.dk>
Cc: Leon Romanovsky <leonro@...dia.com>,
Jason Gunthorpe <jgg@...dia.com>,
Keith Busch <kbusch@...nel.org>,
linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-nvme@...ts.infradead.org,
Sagi Grimberg <sagi@...mberg.me>
Subject: [PATCH 2/4] blk-mq-dma: unify DMA unmap routine
From: Leon Romanovsky <leonro@...dia.com>
Combine regular and meta-intgerity DMA unmapping flow to one
blk_rq_dma_unmap(). This allows us to handle addition of new
DMA_ATTR_MMIO flow without adding extra function parameters.
Signed-off-by: Leon Romanovsky <leonro@...dia.com>
---
block/blk-mq-dma.c | 29 +++++++++++++++++++++++++++++
include/linux/blk-integrity.h | 3 +--
include/linux/blk-mq-dma.h | 35 ++---------------------------------
3 files changed, 32 insertions(+), 35 deletions(-)
diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
index 4ba7b0323da4..0648bcb705ff 100644
--- a/block/blk-mq-dma.c
+++ b/block/blk-mq-dma.c
@@ -260,6 +260,35 @@ bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
}
EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_next);
+/**
+ * blk_rq_dma_unmap - try to DMA unmap a request
+ * @req: request to unmap
+ * @dma_dev: device to unmap from
+ * @state: DMA IOVA state
+ * @mapped_len: number of bytes to unmap
+ *
+ * Returns %false if the callers need to manually unmap every DMA segment
+ * mapped using @iter or %true if no work is left to be done.
+ */
+bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, size_t mapped_len)
+{
+ struct bio_integrity_payload *bip = bio_integrity(req->bio);
+
+ if ((!bip && req->cmd_flags & REQ_P2PDMA) ||
+ bio_integrity_flagged(req->bio, BIP_P2P_DMA))
+ return true;
+
+ if (dma_use_iova(state)) {
+ dma_iova_destroy(dma_dev, state, mapped_len, rq_dma_dir(req),
+ 0);
+ return true;
+ }
+
+ return !dma_need_unmap(dma_dev);
+}
+EXPORT_SYMBOL(blk_rq_dma_unmap);
+
static inline struct scatterlist *
blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist)
{
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index b659373788f6..4a0e65f00bd6 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -32,8 +32,7 @@ static inline bool blk_rq_integrity_dma_unmap(struct request *req,
struct device *dma_dev, struct dma_iova_state *state,
size_t mapped_len)
{
- return blk_dma_unmap(req, dma_dev, state, mapped_len,
- bio_integrity(req->bio)->bip_flags & BIP_P2P_DMA);
+ return blk_rq_dma_unmap(req, dma_dev, state, mapped_len);
}
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
index 51829958d872..e93e167ec551 100644
--- a/include/linux/blk-mq-dma.h
+++ b/include/linux/blk-mq-dma.h
@@ -29,6 +29,8 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
struct dma_iova_state *state, struct blk_dma_iter *iter);
bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
struct dma_iova_state *state, struct blk_dma_iter *iter);
+bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, size_t mapped_len);
/**
* blk_rq_dma_map_coalesce - were all segments coalesced?
@@ -42,37 +44,4 @@ static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state)
return dma_use_iova(state);
}
-/**
- * blk_dma_unmap - try to DMA unmap a request
- * @req: request to unmap
- * @dma_dev: device to unmap from
- * @state: DMA IOVA state
- * @mapped_len: number of bytes to unmap
- * @is_p2p: true if mapped with PCI_P2PDMA_MAP_BUS_ADDR
- *
- * Returns %false if the callers need to manually unmap every DMA segment
- * mapped using @iter or %true if no work is left to be done.
- */
-static inline bool blk_dma_unmap(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, size_t mapped_len, bool is_p2p)
-{
- if (is_p2p)
- return true;
-
- if (dma_use_iova(state)) {
- dma_iova_destroy(dma_dev, state, mapped_len, rq_dma_dir(req),
- 0);
- return true;
- }
-
- return !dma_need_unmap(dma_dev);
-}
-
-static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, size_t mapped_len)
-{
- return blk_dma_unmap(req, dma_dev, state, mapped_len,
- req->cmd_flags & REQ_P2PDMA);
-}
-
#endif /* BLK_MQ_DMA_H */
--
2.51.0
Powered by blists - more mailing lists