lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200925141218.13550-3-m.szyprowski@samsung.com>
Date:   Fri, 25 Sep 2020 16:12:12 +0200
From:   Marek Szyprowski <m.szyprowski@...sung.com>
To:     Linux IOMMU <iommu@...ts.linux-foundation.org>,
        linux-media@...r.kernel.org, linux-samsung-soc@...r.kernel.org
Cc:     Marek Szyprowski <m.szyprowski@...sung.com>,
        Robin Murphy <robin.murphy@....com>,
        Joerg Roedel <joro@...tes.org>, Christoph Hellwig <hch@....de>,
        Sylwester Nawrocki <snawrocki@...nel.org>,
        Krzysztof Kozlowski <krzk@...nel.org>,
        Bartlomiej Zolnierkiewicz <b.zolnierkie@...sung.com>,
        Tomasz Figa <tfiga@...omium.org>, linux-kernel@...r.kernel.org
Subject: [PATCH 2/8] iommu: iova: properly handle 0 as a valid IOVA address

Zero is a valid DMA and IOVA address on many architectures, so adjust the
IOVA management code to properly handle it. A new value IOVA_BAD_ADDR
(~0UL) is introduced as a generic value for the error case. Adjust all
callers of the alloc_iova_fast() function for the new return value.

Signed-off-by: Marek Szyprowski <m.szyprowski@...sung.com>
---
 drivers/iommu/dma-iommu.c   | 18 ++++++++++--------
 drivers/iommu/intel/iommu.c | 12 ++++++------
 drivers/iommu/iova.c        | 10 ++++++----
 include/linux/iova.h        |  2 ++
 4 files changed, 24 insertions(+), 18 deletions(-)

diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index cd6e3c70ebb3..91dd8f46dae1 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -405,7 +405,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
 {
 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
 	struct iova_domain *iovad = &cookie->iovad;
-	unsigned long shift, iova_len, iova = 0;
+	unsigned long shift, iova_len, iova = IOVA_BAD_ADDR;
 
 	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
 		cookie->msi_iova += size;
@@ -433,11 +433,13 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
 		iova = alloc_iova_fast(iovad, iova_len,
 				       DMA_BIT_MASK(32) >> shift, false);
 
-	if (!iova)
+	if (iova == IOVA_BAD_ADDR)
 		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
 				       true);
 
-	return (dma_addr_t)iova << shift;
+	if (iova != IOVA_BAD_ADDR)
+		return (dma_addr_t)iova << shift;
+	return DMA_MAPPING_ERROR;
 }
 
 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
@@ -493,8 +495,8 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
 	size = iova_align(iovad, size + iova_off);
 
 	iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
-	if (!iova)
-		return DMA_MAPPING_ERROR;
+	if (iova == DMA_MAPPING_ERROR)
+		return iova;
 
 	if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
 		iommu_dma_free_iova(cookie, iova, size);
@@ -617,7 +619,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
 
 	size = iova_align(iovad, size);
 	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
-	if (!iova)
+	if (iova == DMA_MAPPING_ERROR)
 		goto out_free_pages;
 
 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
@@ -887,7 +889,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	}
 
 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
-	if (!iova)
+	if (iova == DMA_MAPPING_ERROR)
 		goto out_restore_sg;
 
 	/*
@@ -1181,7 +1183,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
 		return NULL;
 
 	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
-	if (!iova)
+	if (iova == DMA_MAPPING_ERROR)
 		goto out_free_page;
 
 	if (iommu_map(domain, iova, msi_addr, size, prot))
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 00963cedfd83..885d0dee39cc 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3416,15 +3416,15 @@ static unsigned long intel_alloc_iova(struct device *dev,
 		 */
 		iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
 					   IOVA_PFN(DMA_BIT_MASK(32)), false);
-		if (iova_pfn)
+		if (iova_pfn != IOVA_BAD_ADDR)
 			return iova_pfn;
 	}
 	iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
 				   IOVA_PFN(dma_mask), true);
-	if (unlikely(!iova_pfn)) {
+	if (unlikely(iova_pfn == IOVA_BAD_ADDR)) {
 		dev_err_once(dev, "Allocating %ld-page iova failed\n",
 			     nrpages);
-		return 0;
+		return IOVA_BAD_ADDR;
 	}
 
 	return iova_pfn;
@@ -3454,7 +3454,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 	size = aligned_nrpages(paddr, size);
 
 	iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
-	if (!iova_pfn)
+	if (iova_pfn == IOVA_BAD_ADDR)
 		goto error;
 
 	/*
@@ -3663,7 +3663,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
 
 	iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
 				*dev->dma_mask);
-	if (!iova_pfn) {
+	if (iova_pfn == IOVA_BAD_ADDR) {
 		sglist->dma_length = 0;
 		return 0;
 	}
@@ -3760,7 +3760,7 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
 	nrpages = aligned_nrpages(0, size);
 	iova_pfn = intel_alloc_iova(dev, domain,
 				    dma_to_mm_pfn(nrpages), dma_mask);
-	if (!iova_pfn)
+	if (iova_pfn == IOVA_BAD_ADDR)
 		return DMA_MAPPING_ERROR;
 
 	/*
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 30d969a4c5fd..87555ed1737a 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -407,6 +407,8 @@ EXPORT_SYMBOL_GPL(free_iova);
  * This function tries to satisfy an iova allocation from the rcache,
  * and falls back to regular allocation on failure. If regular allocation
  * fails too and the flush_rcache flag is set then the rcache will be flushed.
+ * Returns a pfn the allocated iova starts at or IOVA_BAD_ADDR in the case
+ * of a failure.
 */
 unsigned long
 alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
@@ -416,7 +418,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
 	struct iova *new_iova;
 
 	iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
-	if (iova_pfn)
+	if (iova_pfn != IOVA_BAD_ADDR)
 		return iova_pfn;
 
 retry:
@@ -425,7 +427,7 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
 		unsigned int cpu;
 
 		if (!flush_rcache)
-			return 0;
+			return IOVA_BAD_ADDR;
 
 		/* Try replenishing IOVAs by flushing rcache. */
 		flush_rcache = false;
@@ -956,7 +958,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
 				       unsigned long limit_pfn)
 {
 	struct iova_cpu_rcache *cpu_rcache;
-	unsigned long iova_pfn = 0;
+	unsigned long iova_pfn = IOVA_BAD_ADDR;
 	bool has_pfn = false;
 	unsigned long flags;
 
@@ -998,7 +1000,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
 	unsigned int log_size = order_base_2(size);
 
 	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
-		return 0;
+		return IOVA_BAD_ADDR;
 
 	return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
 }
diff --git a/include/linux/iova.h b/include/linux/iova.h
index a0637abffee8..69737e6bcef6 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -22,6 +22,8 @@ struct iova {
 	unsigned long	pfn_lo; /* Lowest allocated pfn */
 };
 
+#define IOVA_BAD_ADDR	(~0UL)
+
 struct iova_magazine;
 struct iova_cpu_rcache;
 
-- 
2.17.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ