[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210806103423.3341285-3-stevensd@google.com>
Date: Fri, 6 Aug 2021 19:34:16 +0900
From: David Stevens <stevensd@...omium.org>
To: Robin Murphy <robin.murphy@....com>
Cc: Christoph Hellwig <hch@....de>, Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>,
Sergey Senozhatsky <senozhatsky@...omium.org>,
Lu Baolu <baolu.lu@...ux.intel.com>,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org,
David Stevens <stevensd@...omium.org>
Subject: [PATCH v2 2/9] dma-iommu: expose a few helper functions to module
From: David Stevens <stevensd@...omium.org>
Expose a few helper functions from dma-iommu to the rest of the module.
Signed-off-by: David Stevens <stevensd@...omium.org>
---
drivers/iommu/dma-iommu.c | 23 ++++++++++++-----------
include/linux/dma-iommu.h | 8 ++++++++
2 files changed, 20 insertions(+), 11 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 1491b5450246..055ccda5eba1 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -412,7 +412,7 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
}
}
-static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
+dma_addr_t __iommu_dma_alloc_iova(struct iommu_domain *domain,
size_t size, u64 dma_limit, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
@@ -452,7 +452,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
return (dma_addr_t)iova << shift;
}
-static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
+void __iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
dma_addr_t iova, size_t size, struct page *freelist)
{
struct iova_domain *iovad = &cookie->iovad;
@@ -488,7 +488,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
if (!cookie->fq_domain)
iommu_iotlb_sync(domain, &iotlb_gather);
- iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
+ __iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
@@ -506,12 +506,12 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size = iova_align(iovad, size + iova_off);
- iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
+ iova = __iommu_dma_alloc_iova(domain, size, dma_mask, dev);
if (!iova)
return DMA_MAPPING_ERROR;
if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ __iommu_dma_free_iova(cookie, iova, size, NULL);
return DMA_MAPPING_ERROR;
}
return iova + iova_off;
@@ -617,7 +617,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
return NULL;
size = iova_align(iovad, size);
- iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
+ iova = __iommu_dma_alloc_iova(domain, size,
+ dev->coherent_dma_mask, dev);
if (!iova)
goto out_free_pages;
@@ -643,7 +644,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
out_free_sg:
sg_free_table(sgt);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ __iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
@@ -923,7 +924,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
- iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
+ iova = __iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova)
goto out_restore_sg;
@@ -937,7 +938,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
return __finalise_sg(dev, sg, nents, iova);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, iova_len, NULL);
+ __iommu_dma_free_iova(cookie, iova, iova_len, NULL);
out_restore_sg:
__invalidate_sg(sg, nents);
return 0;
@@ -1226,7 +1227,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
+ iova = __iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
goto out_free_page;
@@ -1240,7 +1241,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page;
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ __iommu_dma_free_iova(cookie, iova, size, NULL);
out_free_page:
kfree(msi_page);
return NULL;
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 758ca4694257..50f676678318 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -42,6 +42,14 @@ void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
extern bool iommu_dma_forcedac;
+struct iommu_dma_cookie;
+
+dma_addr_t __iommu_dma_alloc_iova(struct iommu_domain *domain,
+ size_t size, dma_addr_t dma_limit,
+ struct device *dev);
+void __iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
+ dma_addr_t iova, size_t size, struct page *freelist);
+
#else /* CONFIG_IOMMU_DMA */
struct iommu_domain;
--
2.32.0.605.g8dce9f2422-goog
Powered by blists - more mailing lists