[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201215073705.123786-11-suravee.suthikulpanit@amd.com>
Date: Tue, 15 Dec 2020 01:37:02 -0600
From: Suravee Suthikulpanit <suravee.suthikulpanit@....com>
To: linux-kernel@...r.kernel.org, iommu@...ts.linux-foundation.org
Cc: joro@...tes.org, will@...nel.org,
Suravee Suthikulpanit <suravee.suthikulpanit@....com>
Subject: [PATCH v4 10/13] iommu/amd: Refactor fetch_pte to use struct amd_io_pgtable
To simplify the fetch_pte function. There is no functional change.
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@....com>
---
drivers/iommu/amd/amd_iommu.h | 2 +-
drivers/iommu/amd/io_pgtable.c | 13 +++++++------
drivers/iommu/amd/iommu.c | 4 +++-
3 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 76276d9e463c..83ca822c5349 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -143,7 +143,7 @@ extern int iommu_map_page(struct protection_domain *dom,
extern unsigned long iommu_unmap_page(struct protection_domain *dom,
unsigned long bus_addr,
unsigned long page_size);
-extern u64 *fetch_pte(struct protection_domain *domain,
+extern u64 *fetch_pte(struct amd_io_pgtable *pgtable,
unsigned long address,
unsigned long *page_size);
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 35dd9153e6b7..87184b6cee0f 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -317,7 +317,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
* This function checks if there is a PTE for a given dma address. If
* there is one, it returns the pointer to it.
*/
-u64 *fetch_pte(struct protection_domain *domain,
+u64 *fetch_pte(struct amd_io_pgtable *pgtable,
unsigned long address,
unsigned long *page_size)
{
@@ -326,11 +326,11 @@ u64 *fetch_pte(struct protection_domain *domain,
*page_size = 0;
- if (address > PM_LEVEL_SIZE(domain->iop.mode))
+ if (address > PM_LEVEL_SIZE(pgtable->mode))
return NULL;
- level = domain->iop.mode - 1;
- pte = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@@ -465,6 +465,8 @@ unsigned long iommu_unmap_page(struct protection_domain *dom,
unsigned long iova,
unsigned long size)
{
+ struct io_pgtable_ops *ops = &dom->iop.iop.ops;
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long long unmapped;
unsigned long unmap_size;
u64 *pte;
@@ -474,8 +476,7 @@ unsigned long iommu_unmap_page(struct protection_domain *dom,
unmapped = 0;
while (unmapped < size) {
- pte = fetch_pte(dom, iova, &unmap_size);
-
+ pte = fetch_pte(pgtable, iova, &unmap_size);
if (pte) {
int i, count;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 2963a37b7c16..76f61dd6b89f 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2100,13 +2100,15 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
dma_addr_t iova)
{
struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+ struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
unsigned long offset_mask, pte_pgsize;
u64 *pte, __pte;
if (domain->iop.mode == PAGE_MODE_NONE)
return iova;
- pte = fetch_pte(domain, iova, &pte_pgsize);
+ pte = fetch_pte(pgtable, iova, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
return 0;
--
2.17.1
Powered by blists - more mailing lists