[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210128145837.2250561-5-hch@lst.de>
Date: Thu, 28 Jan 2021 15:58:35 +0100
From: Christoph Hellwig <hch@....de>
To: Mauro Carvalho Chehab <mchehab@...nel.org>,
Marek Szyprowski <m.szyprowski@...sung.com>,
Tomasz Figa <tfiga@...omium.org>,
Ricardo Ribalda <ribalda@...omium.org>,
Sergey Senozhatsky <senozhatsky@...gle.com>,
iommu@...ts.linux-foundation.org
Cc: Robin Murphy <robin.murphy@....com>, linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-media@...r.kernel.org
Subject: [PATCH 4/6] dma-iommu: refactor iommu_dma_alloc_remap
Split out a new helper that only allocates a sg_table worth of
memory without mapping it into contiguous kernel address space.
Signed-off-by: Christoph Hellwig <hch@....de>
---
drivers/iommu/dma-iommu.c | 66 +++++++++++++++++++++------------------
1 file changed, 35 insertions(+), 31 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 255533faf90599..65af875ba8495c 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -661,24 +661,13 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
return pages;
}
-/**
- * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
- * @dev: Device to allocate memory for. Must be a real device
- * attached to an iommu_dma_domain
- * @size: Size of buffer in bytes
- * @dma_handle: Out argument for allocated DMA handle
- * @gfp: Allocation flags
- * @prot: pgprot_t to use for the remapped mapping
- * @attrs: DMA attributes for this allocation
- *
- * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
+/*
+ * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
* but an IOMMU which supports smaller pages might not map the whole thing.
- *
- * Return: Mapped virtual address, or NULL on failure.
*/
-static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
- unsigned long attrs)
+static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
+ size_t size, struct sg_table *sgt, dma_addr_t *dma_handle,
+ gfp_t gfp, pgprot_t prot, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -687,9 +676,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
- struct sg_table sgt;
dma_addr_t iova;
- void *vaddr;
*dma_handle = DMA_MAPPING_ERROR;
@@ -717,34 +704,26 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
if (!iova)
goto out_free_pages;
- if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
+ if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova;
if (!(ioprot & IOMMU_CACHE)) {
struct scatterlist *sg;
int i;
- for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
+ if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
< size)
goto out_free_sg;
- vaddr = dma_common_pages_remap(pages, size, prot,
- __builtin_return_address(0));
- if (!vaddr)
- goto out_unmap;
-
*dma_handle = iova;
- sg_free_table(&sgt);
- return vaddr;
+ return pages;
-out_unmap:
- __iommu_dma_unmap(dev, iova, size);
out_free_sg:
- sg_free_table(&sgt);
+ sg_free_table(sgt);
out_free_iova:
iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_pages:
@@ -752,6 +731,31 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
return NULL;
}
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
+ unsigned long attrs)
+{
+ struct page **pages;
+ struct sg_table sgt;
+ void *vaddr;
+
+ pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, dma_handle,
+ gfp, prot, attrs);
+ if (!pages)
+ return NULL;
+ sg_free_table(&sgt);
+ vaddr = dma_common_pages_remap(pages, size, prot,
+ __builtin_return_address(0));
+ if (!vaddr)
+ goto out_unmap;
+ return vaddr;
+
+out_unmap:
+ __iommu_dma_unmap(dev, *dma_handle, size);
+ __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
+ return NULL;
+}
+
static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
--
2.29.2
Powered by blists - more mailing lists