[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191220002914.19043-3-nicoleotsuka@gmail.com>
Date: Thu, 19 Dec 2019 16:29:12 -0800
From: Nicolin Chen <nicoleotsuka@...il.com>
To: thierry.reding@...il.com, joro@...tes.org
Cc: jonathanh@...dia.com, linux-tegra@...r.kernel.org,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org
Subject: [PATCH 2/4] iommu/tegra-smmu: Do not use PAGE_SHIFT and PAGE_MASK
PAGE_SHIFT and PAGE_MASK are defined corresponding to the page size
for CPU virtual addresses, which means PAGE_SHIFT could be a number
other than 12, but tegra-smmu maintains fixed 4KB IOVA pages and has
fixed [21:12] bit range for PTE entries.
So this patch replaces all PAGE_SHIFT/PAGE_MASK references with the
macros defined with SMMU_PTE_SHIFT.
Signed-off-by: Nicolin Chen <nicoleotsuka@...il.com>
---
drivers/iommu/tegra-smmu.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 63a147b623e6..5594b47a88bf 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -127,6 +127,11 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_SHIFT 22
#define SMMU_PTE_SHIFT 12
+#define SMMU_PAGE_MASK (~(SMMU_SIZE_PT-1))
+#define SMMU_OFFSET_IN_PAGE(x) ((unsigned long)(x) & ~SMMU_PAGE_MASK)
+#define SMMU_PFN_PHYS(x) ((phys_addr_t)(x) << SMMU_PTE_SHIFT)
+#define SMMU_PHYS_PFN(x) ((unsigned long)((x) >> SMMU_PTE_SHIFT))
+
#define SMMU_PD_READABLE (1 << 31)
#define SMMU_PD_WRITABLE (1 << 30)
#define SMMU_PD_NONSECURE (1 << 29)
@@ -644,7 +649,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
u32 *pte, dma_addr_t pte_dma, u32 val)
{
struct tegra_smmu *smmu = as->smmu;
- unsigned long offset = offset_in_page(pte);
+ unsigned long offset = SMMU_OFFSET_IN_PAGE(pte);
*pte = val;
@@ -680,7 +685,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
pte_attrs |= SMMU_PTE_WRITABLE;
tegra_smmu_set_pte(as, iova, pte, pte_dma,
- __phys_to_pfn(paddr) | pte_attrs);
+ SMMU_PHYS_PFN(paddr) | pte_attrs);
return 0;
}
@@ -716,7 +721,7 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
pfn = *pte & as->smmu->pfn_mask;
- return PFN_PHYS(pfn);
+ return SMMU_PFN_PHYS(pfn);
}
static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
@@ -1034,7 +1039,8 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->dev = dev;
smmu->mc = mc;
- smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
+ smmu->pfn_mask =
+ BIT_MASK(mc->soc->num_address_bits - SMMU_PTE_SHIFT) - 1;
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
mc->soc->num_address_bits, smmu->pfn_mask);
smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
--
2.17.1
Powered by blists - more mailing lists