[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200817234033.442511-3-leobras.c@gmail.com>
Date: Mon, 17 Aug 2020 20:40:25 -0300
From: Leonardo Bras <leobras.c@...il.com>
To: Michael Ellerman <mpe@...erman.id.au>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Paul Mackerras <paulus@...ba.org>,
Alexey Kardashevskiy <aik@...abs.ru>,
Christophe Leroy <christophe.leroy@....fr>,
Leonardo Bras <leobras.c@...il.com>,
Joel Stanley <joel@....id.au>,
Thiago Jung Bauermann <bauerman@...ux.ibm.com>,
Ram Pai <linuxram@...ibm.com>,
Brian King <brking@...ux.vnet.ibm.com>,
Murilo Fossa Vicentini <muvic@...ux.ibm.com>,
David Dai <zdai@...ux.vnet.ibm.com>
Cc: linuxppc-dev@...ts.ozlabs.org, linux-kernel@...r.kernel.org
Subject: [PATCH v1 02/10] powerpc/kernel/iommu: Align size for IOMMU_PAGE_SIZE on iommu_*_coherent()
Both iommu_alloc_coherent() and iommu_free_coherent() assume that once
size is aligned to PAGE_SIZE it will be aligned to IOMMU_PAGE_SIZE.
Update those functions to guarantee alignment with requested size
using IOMMU_PAGE_ALIGN() before doing iommu_alloc() / iommu_free().
Also, on iommu_range_alloc(), replace ALIGN(n, 1 << tbl->it_page_shift)
with IOMMU_PAGE_ALIGN(n, tbl), which seems easier to read.
Signed-off-by: Leonardo Bras <leobras.c@...il.com>
---
arch/powerpc/kernel/iommu.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 9704f3f76e63..d7086087830f 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -237,10 +237,9 @@ static unsigned long iommu_range_alloc(struct device *dev,
}
if (dev)
- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- 1 << tbl->it_page_shift);
+ boundary_size = IOMMU_PAGE_ALIGN(dma_get_seg_boundary(dev) + 1, tbl);
else
- boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
+ boundary_size = IOMMU_PAGE_ALIGN(1UL << 32, tbl);
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
@@ -858,6 +857,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
unsigned int order;
unsigned int nio_pages, io_order;
struct page *page;
+ size_t size_io = size;
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -884,8 +884,9 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
- nio_pages = size >> tbl->it_page_shift;
- io_order = get_iommu_order(size, tbl);
+ size_io = IOMMU_PAGE_ALIGN(size_io, tbl);
+ nio_pages = size_io >> tbl->it_page_shift;
+ io_order = get_iommu_order(size_io, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0);
if (mapping == DMA_MAPPING_ERROR) {
@@ -900,11 +901,11 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
if (tbl) {
- unsigned int nio_pages;
+ size_t size_io = IOMMU_PAGE_ALIGN(size, tbl);
+ unsigned int nio_pages = size_io >> tbl->it_page_shift;
- size = PAGE_ALIGN(size);
- nio_pages = size >> tbl->it_page_shift;
iommu_free(tbl, dma_handle, nio_pages);
+
size = PAGE_ALIGN(size);
free_pages((unsigned long)vaddr, get_order(size));
}
--
2.25.4
Powered by blists - more mailing lists