[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220405070406.975651673@linuxfoundation.org>
Date: Tue, 5 Apr 2022 09:32:48 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, David Stevens <stevensd@...omium.org>,
Christoph Hellwig <hch@....de>,
Robin Murphy <robin.murphy@....com>,
Joerg Roedel <jroedel@...e.de>,
Mario Limonciello <Mario.Limonciello@....com>
Subject: [PATCH 5.15 906/913] iommu/dma: Fold _swiotlb helpers into callers
From: David Stevens <stevensd@...omium.org>
commit 9b49bbc2c4dfd0431bf7ff4e862171189cf94b7e upstream.
Fold the _swiotlb helper functions into the respective _page functions,
since recent fixes have moved all logic from the _page functions to the
_swiotlb functions.
Signed-off-by: David Stevens <stevensd@...omium.org>
Reviewed-by: Christoph Hellwig <hch@....de>
Reviewed-by: Robin Murphy <robin.murphy@....com>
Link: https://lore.kernel.org/r/20210929023300.335969-5-stevensd@google.com
Signed-off-by: Joerg Roedel <jroedel@...e.de>
Cc: Mario Limonciello <Mario.Limonciello@....com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
drivers/iommu/dma-iommu.c | 135 ++++++++++++++++++++--------------------------
1 file changed, 59 insertions(+), 76 deletions(-)
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -510,26 +510,6 @@ static void __iommu_dma_unmap(struct dev
iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
}
-static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct iommu_domain *domain = iommu_get_dma_domain(dev);
- phys_addr_t phys;
-
- phys = iommu_iova_to_phys(domain, dma_addr);
- if (WARN_ON(!phys))
- return;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
- arch_sync_dma_for_cpu(phys, size, dir);
-
- __iommu_dma_unmap(dev, dma_addr, size);
-
- if (unlikely(is_swiotlb_buffer(dev, phys)))
- swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
-}
-
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot, u64 dma_mask)
{
@@ -556,55 +536,6 @@ static dma_addr_t __iommu_dma_map(struct
return iova + iova_off;
}
-static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
- size_t org_size, dma_addr_t dma_mask, bool coherent,
- enum dma_data_direction dir, unsigned long attrs)
-{
- int prot = dma_info_to_prot(dir, coherent, attrs);
- struct iommu_domain *domain = iommu_get_dma_domain(dev);
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
- size_t aligned_size = org_size;
- void *padding_start;
- size_t padding_size;
- dma_addr_t iova;
-
- /*
- * If both the physical buffer start address and size are
- * page aligned, we don't need to use a bounce page.
- */
- if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
- iova_offset(iovad, phys | org_size)) {
- aligned_size = iova_align(iovad, org_size);
- phys = swiotlb_tbl_map_single(dev, phys, org_size,
- aligned_size, dir, attrs);
-
- if (phys == DMA_MAPPING_ERROR)
- return DMA_MAPPING_ERROR;
-
- /* Cleanup the padding area. */
- padding_start = phys_to_virt(phys);
- padding_size = aligned_size;
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE ||
- dir == DMA_BIDIRECTIONAL)) {
- padding_start += org_size;
- padding_size -= org_size;
- }
-
- memset(padding_start, 0, padding_size);
- }
-
- if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- arch_sync_dma_for_device(phys, org_size, dir);
-
- iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
- if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
- swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
- return iova;
-}
-
static void __iommu_dma_free_pages(struct page **pages, int count)
{
while (count--)
@@ -866,15 +797,68 @@ static dma_addr_t iommu_dma_map_page(str
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
+ int prot = dma_info_to_prot(dir, coherent, attrs);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ size_t aligned_size = size;
+ dma_addr_t iova, dma_mask = dma_get_mask(dev);
+
+ /*
+ * If both the physical buffer start address and size are
+ * page aligned, we don't need to use a bounce page.
+ */
+ if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
+ iova_offset(iovad, phys | size)) {
+ void *padding_start;
+ size_t padding_size;
+
+ aligned_size = iova_align(iovad, size);
+ phys = swiotlb_tbl_map_single(dev, phys, size,
+ aligned_size, dir, attrs);
+
+ if (phys == DMA_MAPPING_ERROR)
+ return DMA_MAPPING_ERROR;
- return __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
- coherent, dir, attrs);
+ /* Cleanup the padding area. */
+ padding_start = phys_to_virt(phys);
+ padding_size = aligned_size;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
+ padding_start += size;
+ padding_size -= size;
+ }
+
+ memset(padding_start, 0, padding_size);
+ }
+
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(phys, size, dir);
+
+ iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
+ if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
+ swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
+ return iova;
}
static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ phys_addr_t phys;
+
+ phys = iommu_iova_to_phys(domain, dma_handle);
+ if (WARN_ON(!phys))
+ return;
+
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
+ arch_sync_dma_for_cpu(phys, size, dir);
+
+ __iommu_dma_unmap(dev, dma_handle, size);
+
+ if (unlikely(is_swiotlb_buffer(dev, phys)))
+ swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
/*
@@ -959,7 +943,7 @@ static void iommu_dma_unmap_sg_swiotlb(s
int i;
for_each_sg(sg, s, nents, i)
- __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
+ iommu_dma_unmap_page(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
}
@@ -970,9 +954,8 @@ static int iommu_dma_map_sg_swiotlb(stru
int i;
for_each_sg(sg, s, nents, i) {
- sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
- s->length, dma_get_mask(dev),
- dev_is_dma_coherent(dev), dir, attrs);
+ sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
+ s->offset, s->length, dir, attrs);
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(s) = s->length;
Powered by blists - more mailing lists