[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190213182920.16764-13-hch@lst.de>
Date: Wed, 13 Feb 2019 19:29:11 +0100
From: Christoph Hellwig <hch@....de>
To: Robin Murphy <robin.murphy@....com>
Cc: Joerg Roedel <joro@...tes.org>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will.deacon@....com>,
Tom Lendacky <thomas.lendacky@....com>,
iommu@...ts.linux-foundation.org,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org
Subject: [PATCH 12/21] dma-iommu: factor atomic pool allocations into helpers
This keeps the code together and will simplify compiling the code
out on architectures that are always dma coherent.
Signed-off-by: Christoph Hellwig <hch@....de>
---
drivers/iommu/dma-iommu.c | 51 +++++++++++++++++++++++++++++----------
1 file changed, 38 insertions(+), 13 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index c1ecb0c3436e..ff1ada9f98f5 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -674,6 +674,35 @@ static int iommu_dma_get_sgtable_remap(struct sg_table *sgt, void *cpu_addr,
GFP_KERNEL);
}
+static void iommu_dma_free_pool(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ __iommu_dma_unmap(iommu_get_domain_for_dev(dev), dma_handle, size);
+ dma_free_from_pool(vaddr, PAGE_ALIGN(size));
+}
+
+static void *iommu_dma_alloc_pool(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ bool coherent = dev_is_dma_coherent(dev);
+ struct page *page;
+ void *vaddr;
+
+ vaddr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+ if (!vaddr)
+ return NULL;
+
+ *dma_handle = __iommu_dma_map(dev, page_to_phys(page), size,
+ dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs),
+ iommu_get_domain_for_dev(dev));
+ if (*dma_handle == DMA_MAPPING_ERROR) {
+ dma_free_from_pool(vaddr, PAGE_ALIGN(size));
+ return NULL;
+ }
+
+ return vaddr;
+}
+
static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
@@ -982,21 +1011,18 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
* get the virtually contiguous buffer we need by way of a
* physically contiguous allocation.
*/
- if (coherent) {
- page = alloc_pages(gfp, get_order(size));
- addr = page ? page_address(page) : NULL;
- } else {
- addr = dma_alloc_from_pool(size, &page, gfp);
- }
- if (!addr)
+ if (!coherent)
+ return iommu_dma_alloc_pool(dev, iosize, handle, gfp,
+ attrs);
+
+ page = alloc_pages(gfp, get_order(size));
+ if (!page)
return NULL;
+ addr = page_address(page);
*handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
if (*handle == DMA_MAPPING_ERROR) {
- if (coherent)
- __free_pages(page, get_order(size));
- else
- dma_free_from_pool(addr, size);
+ __free_pages(page, get_order(size));
addr = NULL;
}
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
@@ -1050,8 +1076,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
* Hence how dodgy the below logic looks...
*/
if (dma_in_atomic_pool(cpu_addr, size)) {
- __iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
- dma_free_from_pool(cpu_addr, size);
+ iommu_dma_free_pool(dev, size, cpu_addr, handle);
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
struct page *page = vmalloc_to_page(cpu_addr);
--
2.20.1
Powered by blists - more mailing lists