[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231123065708.91345-9-luxu.kernel@bytedance.com>
Date: Thu, 23 Nov 2023 14:57:05 +0800
From: Xu Lu <luxu.kernel@...edance.com>
To: paul.walmsley@...ive.com, palmer@...belt.com,
aou@...s.berkeley.edu, ardb@...nel.org, anup@...infault.org,
atishp@...shpatra.org
Cc: dengliang.1214@...edance.com, xieyongji@...edance.com,
lihangjing@...edance.com, songmuchun@...edance.com,
punit.agrawal@...edance.com, linux-kernel@...r.kernel.org,
linux-riscv@...ts.infradead.org, Xu Lu <luxu.kernel@...edance.com>
Subject: [RFC PATCH V1 08/11] riscv: Apply Svnapot for base page mapping
The Svnapot extension on RISC-V is like contiguous PTE on ARM64. It
allows ptes of a naturally aligned power-of 2 (NAPOT) memory range be
encoded in the same format to save the TLB space.
This commit applies Svnapot for each base page's mapping. This commit is
the key to achieving larger base page's performance optimization.
Signed-off-by: Xu Lu <luxu.kernel@...edance.com>
---
arch/riscv/include/asm/pgtable.h | 34 +++++++++++++++++++++++++++-----
1 file changed, 29 insertions(+), 5 deletions(-)
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 56366f07985d..803dc5fb6314 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -230,6 +230,16 @@ static __always_inline unsigned long __pte_napot(unsigned long pteval)
return pteval & _PAGE_NAPOT;
}
+static __always_inline unsigned long __pte_mknapot(unsigned long pteval,
+ unsigned int order)
+{
+ int pos = order - 1 + _PAGE_PFN_SHIFT;
+ unsigned long napot_bit = BIT(pos);
+ unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
+
+ return (pteval & napot_mask) | napot_bit | _PAGE_NAPOT;
+}
+
static inline pte_t __pte(unsigned long pteval)
{
pte_t pte;
@@ -348,13 +358,11 @@ static inline unsigned long pte_napot(pte_t pte)
return __pte_napot(pte_val(pte));
}
-static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
+static inline pte_t pte_mknapot(pte_t pte, unsigned int page_order)
{
- int pos = order - 1 + _PAGE_PFN_SHIFT;
- unsigned long napot_bit = BIT(pos);
- unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
+ unsigned int hw_page_order = page_order + (PAGE_SHIFT - HW_PAGE_SHIFT);
- return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
+ return __pte(__pte_mknapot(pte_val(pte), hw_page_order));
}
#else
@@ -366,6 +374,11 @@ static inline unsigned long pte_napot(pte_t pte)
return 0;
}
+static inline pte_t pte_mknapot(pte_t pte, unsigned int page_order)
+{
+ return pte;
+}
+
#endif /* CONFIG_RISCV_ISA_SVNAPOT */
/* Yields the page frame number (PFN) of a page table entry */
@@ -585,6 +598,17 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
*/
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
+ unsigned long order;
+
+ /*
+ * has_svnapot() always return false before riscv_isa is initialized.
+ */
+ if (has_svnapot() && pte_present(pteval) && !pte_napot(pteval)) {
+ for_each_napot_order(order) {
+ if (napot_cont_shift(order) == PAGE_SHIFT)
+ pteval = pte_mknapot(pteval, order);
+ }
+ }
*ptep = pteval;
}
--
2.20.1
Powered by blists - more mailing lists