[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e3a8350baeaad544010c65dc62db53cf92ff2be1.1726138681.git.leon@kernel.org>
Date: Thu, 12 Sep 2024 14:15:37 +0300
From: Leon Romanovsky <leon@...nel.org>
To: Jens Axboe <axboe@...nel.dk>,
Jason Gunthorpe <jgg@...pe.ca>,
Robin Murphy <robin.murphy@....com>,
Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>,
Keith Busch <kbusch@...nel.org>,
Christoph Hellwig <hch@....de>,
"Zeng, Oak" <oak.zeng@...el.com>,
Chaitanya Kulkarni <kch@...dia.com>
Cc: Leon Romanovsky <leonro@...dia.com>,
Sagi Grimberg <sagi@...mberg.me>,
Bjorn Helgaas <bhelgaas@...gle.com>,
Logan Gunthorpe <logang@...tatee.com>,
Yishai Hadas <yishaih@...dia.com>,
Shameer Kolothum <shameerali.kolothum.thodi@...wei.com>,
Kevin Tian <kevin.tian@...el.com>,
Alex Williamson <alex.williamson@...hat.com>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Jérôme Glisse <jglisse@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
linux-block@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-rdma@...r.kernel.org,
iommu@...ts.linux.dev,
linux-nvme@...ts.infradead.org,
linux-pci@...r.kernel.org,
kvm@...r.kernel.org,
linux-mm@...ck.org
Subject: [RFC v2 02/21] iommu/dma: Implement link/unlink ranges callbacks
From: Leon Romanovsky <leonro@...dia.com>
Add an implementation of link/unlink interface to perform in map/unmap
pages in fast patch for pre-allocated IOVA.
Signed-off-by: Leon Romanovsky <leonro@...dia.com>
---
drivers/iommu/dma-iommu.c | 86 +++++++++++++++++++++++++++++++++++++++
include/linux/iommu-dma.h | 25 ++++++++++++
2 files changed, 111 insertions(+)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 09deea2fc86b..72763f76b712 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1743,6 +1743,92 @@ void iommu_dma_free_iova(struct dma_iova_state *state)
&iotlb_gather);
}
+int iommu_dma_start_range(struct device *dev)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+
+ if (static_branch_unlikely(&iommu_deferred_attach_enabled))
+ return iommu_deferred_attach(dev, domain);
+
+ return 0;
+}
+
+void iommu_dma_end_range(struct device *dev)
+{
+ /* TODO: Factor out ops->iotlb_sync_map(..) call from iommu_map()
+ * and put it here to provide batched iotlb sync for the range.
+ */
+}
+
+dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
+ size_t size, unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(state->dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t iova_off = iova_offset(iovad, phys);
+ bool coherent = dev_is_dma_coherent(state->dev);
+ int prot = dma_info_to_prot(state->dir, coherent, attrs);
+ dma_addr_t addr = state->addr + state->range_size;
+ int ret;
+
+ WARN_ON_ONCE(iova_off && state->range_size > 0);
+
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(phys, size, state->dir);
+
+ size = iova_align(iovad, size + iova_off);
+ ret = iommu_map(domain, addr, phys - iova_off, size, prot, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ state->range_size += size;
+ return addr + iova_off;
+}
+
+static void iommu_sync_dma_for_cpu(struct iommu_domain *domain,
+ dma_addr_t start, size_t size,
+ enum dma_data_direction dir)
+{
+ size_t sync_size, unmapped = 0;
+ phys_addr_t phys;
+
+ do {
+ phys = iommu_iova_to_phys(domain, start + unmapped);
+ if (WARN_ON(!phys))
+ continue;
+
+ sync_size = (unmapped + PAGE_SIZE > size) ? size % PAGE_SIZE :
+ PAGE_SIZE;
+ arch_sync_dma_for_cpu(phys, sync_size, dir);
+ unmapped += sync_size;
+ } while (unmapped < size);
+}
+
+void iommu_dma_unlink_range(struct device *dev, dma_addr_t start, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ struct iommu_iotlb_gather iotlb_gather;
+ bool coherent = dev_is_dma_coherent(dev);
+ size_t unmapped;
+
+ iommu_iotlb_gather_init(&iotlb_gather);
+ iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !coherent)
+ iommu_sync_dma_for_cpu(domain, start, size, dir);
+
+ size = iova_align(iovad, size);
+ unmapped = iommu_unmap_fast(domain, start, size, &iotlb_gather);
+ WARN_ON(unmapped != size);
+
+ if (!iotlb_gather.queued)
+ iommu_iotlb_sync(domain, &iotlb_gather);
+}
+
void iommu_setup_dma_ops(struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
diff --git a/include/linux/iommu-dma.h b/include/linux/iommu-dma.h
index 698df67b152a..21b0341f52b8 100644
--- a/include/linux/iommu-dma.h
+++ b/include/linux/iommu-dma.h
@@ -60,6 +60,12 @@ void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
int iommu_dma_alloc_iova(struct dma_iova_state *state, phys_addr_t phys,
size_t size);
void iommu_dma_free_iova(struct dma_iova_state *state);
+int iommu_dma_start_range(struct device *dev);
+void iommu_dma_end_range(struct device *dev);
+dma_addr_t iommu_dma_link_range(struct dma_iova_state *state, phys_addr_t phys,
+ size_t size, unsigned long attrs);
+void iommu_dma_unlink_range(struct device *dev, dma_addr_t start, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
#else
static inline bool use_dma_iommu(struct device *dev)
{
@@ -184,5 +190,24 @@ static inline int iommu_dma_alloc_iova(struct dma_iova_state *state,
static inline void iommu_dma_free_iova(struct dma_iova_state *state)
{
}
+static inline int iommu_dma_start_range(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+static inline void iommu_dma_end_range(struct device *dev)
+{
+}
+static inline dma_addr_t iommu_dma_link_range(struct dma_iova_state *state,
+ phys_addr_t phys, size_t size,
+ unsigned long attrs)
+{
+ return DMA_MAPPING_ERROR;
+}
+static inline void iommu_dma_unlink_range(struct device *dev, dma_addr_t start,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+}
#endif /* CONFIG_IOMMU_DMA */
#endif /* _LINUX_IOMMU_DMA_H */
--
2.46.0
Powered by blists - more mailing lists