[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210813073839.1562438-5-stevensd@google.com>
Date: Fri, 13 Aug 2021 16:38:37 +0900
From: David Stevens <stevensd@...omium.org>
To: Robin Murphy <robin.murphy@....com>, Christoph Hellwig <hch@....de>
Cc: Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>,
Lu Baolu <baolu.lu@...ux.intel.com>,
Tom Murphy <murphyt7@....ie>, iommu@...ts.linux-foundation.org,
linux-kernel@...r.kernel.org, David Stevens <stevensd@...omium.org>
Subject: [PATCH v4 4/6] dma-iommu: Check CONFIG_SWIOTLB more broadly
From: David Stevens <stevensd@...omium.org>
Introduce a new dev_use_swiotlb function to guard swiotlb code, instead
of overloading dev_is_untrusted. This allows CONFIG_SWIOTLB to be
checked more broadly, so the swiotlb related code can be removed more
aggressively.
Signed-off-by: David Stevens <stevensd@...omium.org>
Reviewed-by: Robin Murphy <robin.murphy@....com>
---
drivers/iommu/dma-iommu.c | 19 ++++++++++++-------
1 file changed, 12 insertions(+), 7 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f7da4934f7e6..bad813d63ea6 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -317,6 +317,11 @@ static bool dev_is_untrusted(struct device *dev)
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
}
+static bool dev_use_swiotlb(struct device *dev)
+{
+ return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev);
+}
+
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -713,7 +718,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
{
phys_addr_t phys;
- if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
+ if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
@@ -729,7 +734,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
{
phys_addr_t phys;
- if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
+ if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
@@ -747,7 +752,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg;
int i;
- if (dev_is_untrusted(dev))
+ if (dev_use_swiotlb(dev))
for_each_sg(sgl, sg, nelems, i)
iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
sg->length, dir);
@@ -763,7 +768,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg;
int i;
- if (dev_is_untrusted(dev))
+ if (dev_use_swiotlb(dev))
for_each_sg(sgl, sg, nelems, i)
iommu_dma_sync_single_for_device(dev,
sg_dma_address(sg),
@@ -790,7 +795,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
* If both the physical buffer start address and size are
* page aligned, we don't need to use a bounce page.
*/
- if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
+ if (IS_ENABLED(CONFIG_SWIOTLB) && dev_use_swiotlb(dev) &&
iova_offset(iovad, phys | size)) {
void *padding_start;
size_t padding_size;
@@ -975,7 +980,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
iommu_deferred_attach(dev, domain))
return 0;
- if (dev_is_untrusted(dev))
+ if (dev_use_swiotlb(dev))
return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
@@ -1047,7 +1052,7 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
struct scatterlist *tmp;
int i;
- if (dev_is_untrusted(dev)) {
+ if (dev_use_swiotlb(dev)) {
iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
return;
}
--
2.33.0.rc1.237.g0d66db33f3-goog
Powered by blists - more mailing lists