[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190421011719.14909-10-baolu.lu@linux.intel.com>
Date: Sun, 21 Apr 2019 09:17:18 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: David Woodhouse <dwmw2@...radead.org>,
Joerg Roedel <joro@...tes.org>
Cc: ashok.raj@...el.com, jacob.jun.pan@...el.com, alan.cox@...el.com,
kevin.tian@...el.com, mika.westerberg@...ux.intel.com,
pengfei.xu@...el.com,
Konrad Rzeszutek Wilk <konrad.wilk@...cle.com>,
Christoph Hellwig <hch@....de>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Robin Murphy <robin.murphy@....com>,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org,
Lu Baolu <baolu.lu@...ux.intel.com>,
Jacob Pan <jacob.jun.pan@...ux.intel.com>
Subject: [PATCH v3 09/10] iommu/vt-d: Add dma sync ops for untrusted devices
This adds the dma sync ops for dma buffers used by any
untrusted device. We need to sync such buffers because
they might have been mapped with bounce pages.
Cc: Ashok Raj <ashok.raj@...el.com>
Cc: Jacob Pan <jacob.jun.pan@...ux.intel.com>
Cc: Kevin Tian <kevin.tian@...el.com>
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
Tested-by: Xu Pengfei <pengfei.xu@...el.com>
Tested-by: Mika Westerberg <mika.westerberg@...el.com>
---
drivers/iommu/Kconfig | 1 +
drivers/iommu/intel-iommu.c | 96 +++++++++++++++++++++++++++++++++----
2 files changed, 88 insertions(+), 9 deletions(-)
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index b918c22ca25b..f3191ec29e45 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -194,6 +194,7 @@ config INTEL_IOMMU
select IOMMU_IOVA
select NEED_DMA_MAP_STATE
select DMAR_TABLE
+ select IOMMU_BOUNCE_PAGE
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0d80f26b8a72..ed941ec9b9d5 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3683,16 +3683,94 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
return nelems;
}
+static void
+intel_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ if (WARN_ON(dir == DMA_NONE))
+ return;
+
+ if (!device_needs_bounce(dev))
+ return;
+
+ if (iommu_no_mapping(dev))
+ return;
+
+ iommu_bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
+}
+
+static void
+intel_sync_single_for_device(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ if (WARN_ON(dir == DMA_NONE))
+ return;
+
+ if (!device_needs_bounce(dev))
+ return;
+
+ if (iommu_no_mapping(dev))
+ return;
+
+ iommu_bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
+}
+
+static void
+intel_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (WARN_ON(dir == DMA_NONE))
+ return;
+
+ if (!device_needs_bounce(dev))
+ return;
+
+ if (iommu_no_mapping(dev))
+ return;
+
+ for_each_sg(sglist, sg, nelems, i)
+ iommu_bounce_sync_single(dev, sg_dma_address(sg),
+ sg_dma_len(sg), dir, SYNC_FOR_CPU);
+}
+
+static void
+intel_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ if (WARN_ON(dir == DMA_NONE))
+ return;
+
+ if (!device_needs_bounce(dev))
+ return;
+
+ if (iommu_no_mapping(dev))
+ return;
+
+ for_each_sg(sglist, sg, nelems, i)
+ iommu_bounce_sync_single(dev, sg_dma_address(sg),
+ sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
+}
+
static const struct dma_map_ops intel_dma_ops = {
- .alloc = intel_alloc_coherent,
- .free = intel_free_coherent,
- .map_sg = intel_map_sg,
- .unmap_sg = intel_unmap_sg,
- .map_page = intel_map_page,
- .unmap_page = intel_unmap_page,
- .map_resource = intel_map_resource,
- .unmap_resource = intel_unmap_page,
- .dma_supported = dma_direct_supported,
+ .alloc = intel_alloc_coherent,
+ .free = intel_free_coherent,
+ .map_sg = intel_map_sg,
+ .unmap_sg = intel_unmap_sg,
+ .map_page = intel_map_page,
+ .unmap_page = intel_unmap_page,
+ .sync_single_for_cpu = intel_sync_single_for_cpu,
+ .sync_single_for_device = intel_sync_single_for_device,
+ .sync_sg_for_cpu = intel_sync_sg_for_cpu,
+ .sync_sg_for_device = intel_sync_sg_for_device,
+ .map_resource = intel_map_resource,
+ .unmap_resource = intel_unmap_page,
+ .dma_supported = dma_direct_supported,
};
static inline int iommu_domain_cache_init(void)
--
2.17.1
Powered by blists - more mailing lists