[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240207174102.1486130-7-pasha.tatashin@soleen.com>
Date: Wed, 7 Feb 2024 17:40:58 +0000
From: Pasha Tatashin <pasha.tatashin@...een.com>
To: akpm@...ux-foundation.org,
alim.akhtar@...sung.com,
alyssa@...enzweig.io,
asahi@...ts.linux.dev,
baolu.lu@...ux.intel.com,
bhelgaas@...gle.com,
cgroups@...r.kernel.org,
corbet@....net,
david@...hat.com,
dwmw2@...radead.org,
hannes@...xchg.org,
heiko@...ech.de,
iommu@...ts.linux.dev,
jernej.skrabec@...il.com,
jonathanh@...dia.com,
joro@...tes.org,
krzysztof.kozlowski@...aro.org,
linux-doc@...r.kernel.org,
linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
linux-rockchip@...ts.infradead.org,
linux-samsung-soc@...r.kernel.org,
linux-sunxi@...ts.linux.dev,
linux-tegra@...r.kernel.org,
lizefan.x@...edance.com,
marcan@...can.st,
mhiramat@...nel.org,
m.szyprowski@...sung.com,
pasha.tatashin@...een.com,
paulmck@...nel.org,
rdunlap@...radead.org,
robin.murphy@....com,
samuel@...lland.org,
suravee.suthikulpanit@....com,
sven@...npeter.dev,
thierry.reding@...il.com,
tj@...nel.org,
tomas.mudrunka@...il.com,
vdumpa@...dia.com,
wens@...e.org,
will@...nel.org,
yu-cheng.yu@...el.com,
rientjes@...gle.com,
bagasdotme@...il.com,
mkoutny@...e.com
Subject: [PATCH v4 06/10] iommu/rockchip: use page allocation function provided by iommu-pages.h
Convert iommu/rockchip-iommu.c to use the new page allocation functions
provided in iommu-pages.h.
Signed-off-by: Pasha Tatashin <pasha.tatashin@...een.com>
Acked-by: David Rientjes <rientjes@...gle.com>
Tested-by: Bagas Sanjaya <bagasdotme@...il.com>
---
drivers/iommu/rockchip-iommu.c | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 2685861c0a12..e04f22d481d0 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -26,6 +26,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include "iommu-pages.h"
+
/** MMU register offsets */
#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
#define RK_MMU_STATUS 0x04
@@ -727,14 +729,14 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (rk_dte_is_pt_valid(dte))
goto done;
- page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags);
+ page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags);
if (!page_table)
return ERR_PTR(-ENOMEM);
pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, pt_dma)) {
dev_err(dma_dev, "DMA mapping error while allocating page table\n");
- free_page((unsigned long)page_table);
+ iommu_free_page(page_table);
return ERR_PTR(-ENOMEM);
}
@@ -1061,7 +1063,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
* Allocate one 4 KiB page for each table.
*/
- rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags);
+ rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags);
if (!rk_domain->dt)
goto err_free_domain;
@@ -1083,7 +1085,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
return &rk_domain->domain;
err_free_dt:
- free_page((unsigned long)rk_domain->dt);
+ iommu_free_page(rk_domain->dt);
err_free_domain:
kfree(rk_domain);
@@ -1104,13 +1106,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
u32 *page_table = phys_to_virt(pt_phys);
dma_unmap_single(dma_dev, pt_phys,
SPAGE_SIZE, DMA_TO_DEVICE);
- free_page((unsigned long)page_table);
+ iommu_free_page(page_table);
}
}
dma_unmap_single(dma_dev, rk_domain->dt_dma,
SPAGE_SIZE, DMA_TO_DEVICE);
- free_page((unsigned long)rk_domain->dt);
+ iommu_free_page(rk_domain->dt);
kfree(rk_domain);
}
--
2.43.0.594.gd9cf4e227d-goog
Powered by blists - more mailing lists