lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Sat, 28 Aug 2021 17:36:35 +0200
From:   Sven Peter <sven@...npeter.dev>
To:     iommu@...ts.linux-foundation.org
Cc:     Sven Peter <sven@...npeter.dev>, Joerg Roedel <joro@...tes.org>,
        Will Deacon <will@...nel.org>,
        Robin Murphy <robin.murphy@....com>,
        Arnd Bergmann <arnd@...nel.org>,
        Mohamed Mediouni <mohamed.mediouni@...amail.com>,
        Alexander Graf <graf@...zon.com>,
        Hector Martin <marcan@...can.st>,
        Alyssa Rosenzweig <alyssa@...enzweig.io>,
        linux-kernel@...r.kernel.org
Subject: [PATCH v2 1/8] iommu/dma: Align size for untrusted devs to IOVA granule

Up until now PAGE_SIZE was always a multiple of iovad->granule
such that adjacent pages were never exposed to untrusted devices
due to allocations done as part of the coherent DMA API.
With PAGE_SIZE < iovad->granule however all these allocations
must also be aligned to iovad->granule.

Signed-off-by: Sven Peter <sven@...npeter.dev>
---
 drivers/iommu/dma-iommu.c | 40 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 39 insertions(+), 1 deletion(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d0bc8c06e1a4..e8eae34e9e4f 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -735,10 +735,16 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
 		unsigned long attrs)
 {
+	struct iommu_domain *domain = iommu_get_dma_domain(dev);
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iova_domain *iovad = &cookie->iovad;
 	struct page **pages;
 	struct sg_table sgt;
 	void *vaddr;
 
+	if (dev_is_untrusted(dev))
+		size = iova_align(iovad, size);
+
 	pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
 						attrs);
 	if (!pages)
@@ -762,12 +768,18 @@ static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
 		size_t size, enum dma_data_direction dir, gfp_t gfp,
 		unsigned long attrs)
 {
+	struct iommu_domain *domain = iommu_get_dma_domain(dev);
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iova_domain *iovad = &cookie->iovad;
 	struct dma_sgt_handle *sh;
 
 	sh = kmalloc(sizeof(*sh), gfp);
 	if (!sh)
 		return NULL;
 
+	if (dev_is_untrusted(dev))
+		size = iova_align(iovad, size);
+
 	sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
 						    PAGE_KERNEL, attrs);
 	if (!sh->pages) {
@@ -780,8 +792,15 @@ static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
 static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
 		struct sg_table *sgt, enum dma_data_direction dir)
 {
+	struct iommu_domain *domain = iommu_get_dma_domain(dev);
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iova_domain *iovad = &cookie->iovad;
 	struct dma_sgt_handle *sh = sgt_handle(sgt);
 
+
+	if (dev_is_untrusted(dev))
+		size = iova_align(iovad, size);
+
 	__iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
 	__iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
 	sg_free_table(&sh->sgt);
@@ -1127,10 +1146,17 @@ static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
 
 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
 {
+	struct iommu_domain *domain = iommu_get_dma_domain(dev);
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iova_domain *iovad = &cookie->iovad;
 	size_t alloc_size = PAGE_ALIGN(size);
-	int count = alloc_size >> PAGE_SHIFT;
+	int count;
 	struct page *page = NULL, **pages = NULL;
 
+	if (dev_is_untrusted(dev))
+		alloc_size = iova_align(iovad, alloc_size);
+	count = alloc_size >> PAGE_SHIFT;
+
 	/* Non-coherent atomic allocation? Easy */
 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
 	    dma_free_from_pool(dev, cpu_addr, alloc_size))
@@ -1166,12 +1192,18 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
 static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
 		struct page **pagep, gfp_t gfp, unsigned long attrs)
 {
+	struct iommu_domain *domain = iommu_get_dma_domain(dev);
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iova_domain *iovad = &cookie->iovad;
 	bool coherent = dev_is_dma_coherent(dev);
 	size_t alloc_size = PAGE_ALIGN(size);
 	int node = dev_to_node(dev);
 	struct page *page = NULL;
 	void *cpu_addr;
 
+	if (dev_is_untrusted(dev))
+		alloc_size = iova_align(iovad, alloc_size);
+
 	page = dma_alloc_contiguous(dev, alloc_size, gfp);
 	if (!page)
 		page = alloc_pages_node(node, gfp, get_order(alloc_size));
@@ -1203,6 +1235,9 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
 static void *iommu_dma_alloc(struct device *dev, size_t size,
 		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 {
+	struct iommu_domain *domain = iommu_get_dma_domain(dev);
+	struct iommu_dma_cookie *cookie = domain->iova_cookie;
+	struct iova_domain *iovad = &cookie->iovad;
 	bool coherent = dev_is_dma_coherent(dev);
 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
 	struct page *page = NULL;
@@ -1216,6 +1251,9 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
 				dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
 	}
 
+	if (dev_is_untrusted(dev))
+		size = iova_align(iovad, size);
+
 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
 	    !gfpflags_allow_blocking(gfp) && !coherent)
 		page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
-- 
2.25.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ