[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <bcab0427a3e827f4a4c033fbdf61239c8449773c.1597931876.git.robin.murphy@arm.com>
Date: Thu, 20 Aug 2020 16:08:22 +0100
From: Robin Murphy <robin.murphy@....com>
To: hch@....de, joro@...tes.org, linux@...linux.org.uk
Cc: will@...nel.org, inki.dae@...sung.com, sw0312.kim@...sung.com,
kyungmin.park@...sung.com, m.szyprowski@...sung.com,
agross@...nel.org, bjorn.andersson@...aro.org,
thierry.reding@...il.com, jonathanh@...dia.com, vdumpa@...dia.com,
digetx@...il.com, matthias.bgg@...il.com, yong.wu@...iatek.com,
geert+renesas@...der.be, magnus.damm@...il.com, t-kristo@...com,
s-anna@...com, laurent.pinchart@...asonboard.com,
linux-arm-kernel@...ts.infradead.org,
iommu@...ts.linux-foundation.org,
linux-samsung-soc@...r.kernel.org, linux-tegra@...r.kernel.org,
linux-arm-msm@...r.kernel.org, linux-mediatek@...ts.infradead.org,
dri-devel@...ts.freedesktop.org, linux-media@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 03/18] ARM/dma-mapping: Merge IOMMU ops
The dma_sync_* operations are now the only difference between the
coherent and non-coherent IOMMU ops. Some minor tweaks to make those
safe for coherent devices with minimal overhead, and we can condense
down to a single set of DMA ops.
Signed-off-by: Robin Murphy <robin.murphy@....com>
---
arch/arm/mm/dma-mapping.c | 41 +++++++++++++--------------------------
1 file changed, 13 insertions(+), 28 deletions(-)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1bb7e9608f75..0537c97cebe1 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1677,6 +1677,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *s;
int i;
+ if (dev->dma_coherent)
+ return;
+
for_each_sg(sg, s, nents, i)
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
@@ -1696,6 +1699,9 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
struct scatterlist *s;
int i;
+ if (dev->dma_coherent)
+ return;
+
for_each_sg(sg, s, nents, i)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
}
@@ -1829,12 +1835,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
- if (!iova)
+ if (dev->dma_coherent || !iova)
return;
+ page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_dev_to_cpu(page, offset, size, dir);
}
@@ -1843,12 +1850,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
- struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ struct page *page;
unsigned int offset = handle & ~PAGE_MASK;
- if (!iova)
+ if (dev->dma_coherent || !iova)
return;
+ page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_cpu_to_dev(page, offset, size, dir);
}
@@ -1872,22 +1880,6 @@ static const struct dma_map_ops iommu_ops = {
.unmap_resource = arm_iommu_unmap_resource,
};
-static const struct dma_map_ops iommu_coherent_ops = {
- .alloc = arm_iommu_alloc_attrs,
- .free = arm_iommu_free_attrs,
- .mmap = arm_iommu_mmap_attrs,
- .get_sgtable = arm_iommu_get_sgtable,
-
- .map_page = arm_iommu_map_page,
- .unmap_page = arm_iommu_unmap_page,
-
- .map_sg = arm_iommu_map_sg,
- .unmap_sg = arm_iommu_unmap_sg,
-
- .map_resource = arm_iommu_map_resource,
- .unmap_resource = arm_iommu_unmap_resource,
-};
-
/**
* arm_iommu_create_mapping
* @bus: pointer to the bus holding the client device (for IOMMU calls)
@@ -2067,11 +2059,6 @@ void arm_iommu_detach_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
-static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
-{
- return coherent ? &iommu_coherent_ops : &iommu_ops;
-}
-
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu)
{
@@ -2118,8 +2105,6 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
static void arm_teardown_iommu_dma_ops(struct device *dev) { }
-#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
-
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
@@ -2141,7 +2126,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
return;
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
- dma_ops = arm_get_iommu_dma_map_ops(coherent);
+ dma_ops = &iommu_ops;
else
dma_ops = arm_get_dma_map_ops(coherent);
--
2.28.0.dirty
Powered by blists - more mailing lists