[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191221150402.13868-3-murphyt7@tcd.ie>
Date: Sat, 21 Dec 2019 15:03:54 +0000
From: Tom Murphy <murphyt7@....ie>
To: iommu@...ts.linux-foundation.org
Cc: Tom Murphy <murphyt7@....ie>,
Jani Nikula <jani.nikula@...ux.intel.com>,
Joonas Lahtinen <joonas.lahtinen@...ux.intel.com>,
Rodrigo Vivi <rodrigo.vivi@...el.com>,
David Airlie <airlied@...ux.ie>,
Daniel Vetter <daniel@...ll.ch>,
Joerg Roedel <joro@...tes.org>, Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Kukjin Kim <kgene@...nel.org>,
Krzysztof Kozlowski <krzk@...nel.org>,
David Woodhouse <dwmw2@...radead.org>,
Lu Baolu <baolu.lu@...ux.intel.com>,
Andy Gross <agross@...nel.org>,
Bjorn Andersson <bjorn.andersson@...aro.org>,
Matthias Brugger <matthias.bgg@...il.com>,
Rob Clark <robdclark@...il.com>,
Heiko Stuebner <heiko@...ech.de>,
Gerald Schaefer <gerald.schaefer@...ibm.com>,
Thierry Reding <thierry.reding@...il.com>,
Jonathan Hunter <jonathanh@...dia.com>,
Jean-Philippe Brucker <jean-philippe@...aro.org>,
Alex Williamson <alex.williamson@...hat.com>,
Cornelia Huck <cohuck@...hat.com>,
Eric Auger <eric.auger@...hat.com>,
Julien Grall <julien.grall@....com>,
Marc Zyngier <maz@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
intel-gfx@...ts.freedesktop.org, dri-devel@...ts.freedesktop.org,
linux-kernel@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
linux-samsung-soc@...r.kernel.org, linux-arm-msm@...r.kernel.org,
linux-mediatek@...ts.infradead.org,
linux-rockchip@...ts.infradead.org, linux-s390@...r.kernel.org,
linux-tegra@...r.kernel.org,
virtualization@...ts.linux-foundation.org, kvm@...r.kernel.org
Subject: [PATCH 2/8] iommu/vt-d: Use default dma_direct_* mapping functions for direct mapped devices
We should only assign intel_dma_ops to devices which will actually use
the iommu and let the default fall back dma_direct_* functions handle
all other devices. This won't change any behaviour but will just use the
generic implementations for direct mapped devices rather than intel
specific ones.
Signed-off-by: Tom Murphy <murphyt7@....ie>
---
drivers/iommu/intel-iommu.c | 52 +++++--------------------------------
1 file changed, 6 insertions(+), 46 deletions(-)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c1ea66467918..64b1a9793daa 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2794,17 +2794,6 @@ static int __init si_domain_init(int hw)
return 0;
}
-static int identity_mapping(struct device *dev)
-{
- struct device_domain_info *info;
-
- info = dev->archdata.iommu;
- if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO)
- return (info->domain == si_domain);
-
- return 0;
-}
-
static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct dmar_domain *ndomain;
@@ -3461,12 +3450,6 @@ static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
return domain;
}
-/* Check if the dev needs to go through non-identity map and unmap process.*/
-static bool iommu_no_mapping(struct device *dev)
-{
- return iommu_dummy(dev) || identity_mapping(dev);
-}
-
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
size_t size, int dir, u64 dma_mask)
{
@@ -3531,9 +3514,6 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_no_mapping(dev))
- return dma_direct_map_page(dev, page, offset, size, dir, attrs);
-
return __intel_map_single(dev, page_to_phys(page) + offset, size, dir,
*dev->dma_mask);
}
@@ -3542,10 +3522,6 @@ static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_no_mapping(dev))
- return dma_direct_map_resource(dev, phys_addr, size, dir,
- attrs);
-
return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
}
@@ -3597,17 +3573,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- if (iommu_no_mapping(dev))
- dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
- else
- intel_unmap(dev, dev_addr, size);
+ intel_unmap(dev, dev_addr, size);
}
static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- if (!iommu_no_mapping(dev))
- intel_unmap(dev, dev_addr, size);
+ intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3617,9 +3589,6 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL;
int order;
- if (iommu_no_mapping(dev))
- return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
-
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -3653,9 +3622,6 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
int order;
struct page *page = virt_to_page(vaddr);
- if (iommu_no_mapping(dev))
- return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
-
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -3673,9 +3639,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;
int i;
- if (iommu_no_mapping(dev))
- return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
-
for_each_sg(sglist, sg, nelems, i) {
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
}
@@ -3699,8 +3662,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
domain = deferred_attach_domain(dev);
if (!domain)
@@ -3747,8 +3708,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
static u64 intel_get_required_mask(struct device *dev)
{
- if (iommu_no_mapping(dev))
- return dma_direct_get_required_mask(dev);
return DMA_BIT_MASK(32);
}
@@ -5014,7 +4973,6 @@ int __init intel_iommu_init(void)
if (!has_untrusted_dev() || intel_no_bounce)
swiotlb = 0;
#endif
- dma_ops = &intel_dma_ops;
init_iommu_pm_ops();
@@ -5623,6 +5581,8 @@ static int intel_iommu_add_device(struct device *dev)
dev_info(dev,
"Device uses a private identity domain.\n");
}
+ } else {
+ dev->dma_ops = &intel_dma_ops;
}
} else {
if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
@@ -5639,6 +5599,7 @@ static int intel_iommu_add_device(struct device *dev)
dev_info(dev,
"Device uses a private dma domain.\n");
}
+ dev->dma_ops = &intel_dma_ops;
}
}
@@ -5665,8 +5626,7 @@ static void intel_iommu_remove_device(struct device *dev)
iommu_device_unlink(&iommu->iommu, dev);
- if (device_needs_bounce(dev))
- set_dma_ops(dev, NULL);
+ set_dma_ops(dev, NULL);
}
static void intel_iommu_get_resv_regions(struct device *device,
--
2.20.1
Powered by blists - more mailing lists