[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20201126121121.301768209@infradead.org>
Date: Thu, 26 Nov 2020 13:01:19 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: kan.liang@...ux.intel.com, mingo@...nel.org, acme@...nel.org,
mark.rutland@....com, alexander.shishkin@...ux.intel.com,
jolsa@...hat.com, eranian@...gle.com
Cc: christophe.leroy@...roup.eu, npiggin@...il.com,
linuxppc-dev@...ts.ozlabs.org, mpe@...erman.id.au, will@...nel.org,
willy@...radead.org, aneesh.kumar@...ux.ibm.com,
sparclinux@...r.kernel.org, davem@...emloft.net,
catalin.marinas@....com, linux-arch@...r.kernel.org,
linux-kernel@...r.kernel.org, ak@...ux.intel.com,
dave.hansen@...el.com, kirill.shutemov@...ux.intel.com,
peterz@...radead.org
Subject: [PATCH v2 5/6] sparc64/mm: Implement pXX_leaf_size() support
Sparc64 has non-pagetable aligned large page support; wire up the
pXX_leaf_size() functions to report the correct pagetable page size.
This enables PERF_SAMPLE_{DATA,CODE}_PAGE_SIZE to report accurate
pagetable leaf sizes.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
arch/sparc/include/asm/pgtable_64.h | 13 +++++++++++++
arch/sparc/mm/hugetlbpage.c | 19 +++++++++++++------
2 files changed, 26 insertions(+), 6 deletions(-)
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -1121,6 +1121,19 @@ extern unsigned long cmdline_memory_size
asmlinkage void do_sparc64_fault(struct pt_regs *regs);
+#ifdef CONFIG_HUGETLB_PAGE
+
+#define pud_leaf_size pud_leaf_size
+extern unsigned long pud_leaf_size(pud_t pud);
+
+#define pmd_leaf_size pmd_leaf_size
+extern unsigned long pmd_leaf_size(pmd_t pmd);
+
+#define pte_leaf_size pte_leaf_size
+extern unsigned long pte_leaf_size(pte_t pte);
+
+#endif /* CONFIG_HUGETLB_PAGE */
+
#endif /* !(__ASSEMBLY__) */
#endif /* !(_SPARC64_PGTABLE_H) */
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -247,14 +247,17 @@ static unsigned int sun4u_huge_tte_to_sh
return shift;
}
-static unsigned int huge_tte_to_shift(pte_t entry)
+static unsigned long tte_to_shift(pte_t entry)
{
- unsigned long shift;
-
if (tlb_type == hypervisor)
- shift = sun4v_huge_tte_to_shift(entry);
- else
- shift = sun4u_huge_tte_to_shift(entry);
+ return sun4v_huge_tte_to_shift(entry);
+
+ return sun4u_huge_tte_to_shift(entry);
+}
+
+static unsigned int huge_tte_to_shift(pte_t entry)
+{
+ unsigned long shift = tte_to_shift(entry);
if (shift == PAGE_SHIFT)
WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
@@ -272,6 +275,10 @@ static unsigned long huge_tte_to_size(pt
return size;
}
+unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift((pte_t)pud); }
+unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift((pte_t)pmd); }
+unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift((pte_t)pte); }
+
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
Powered by blists - more mailing lists