[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220413030307.133807-9-heiko@sntech.de>
Date: Wed, 13 Apr 2022 05:03:03 +0200
From: Heiko Stuebner <heiko@...ech.de>
To: palmer@...belt.com, paul.walmsley@...ive.com, aou@...s.berkeley.edu
Cc: linux-riscv@...ts.infradead.org, linux-kernel@...r.kernel.org,
wefu@...hat.com, liush@...winnertech.com, guoren@...nel.org,
atishp@...shpatra.org, anup@...infault.org, drew@...gleboard.org,
hch@....de, arnd@...db.de, wens@...e.org, maxime@...no.tech,
gfavor@...tanamicro.com, andrea.mondelli@...wei.com,
behrensj@....edu, xinhaoqu@...wei.com, mick@....forth.gr,
allen.baum@...erantotech.com, jscheid@...tanamicro.com,
rtrauben@...il.com, samuel@...lland.org, cmuellner@...ux.com,
philipp.tomsich@...ll.eu, Heiko Stuebner <heiko@...ech.de>
Subject: [PATCH v9 08/12] riscv: Fix accessing pfn bits in PTEs for non-32bit variants
On rv32 the PFN part of PTEs is defined to use bits [xlen-1:10]
while on rv64 it is defined to use bits [53:10], leaving [63:54]
as reserved.
With upcoming optional extensions like svpbmt these previously
reserved bits will get used so simply right-shifting the PTE
to get the PFN won't be enough.
So introduce a _PAGE_PFN_MASK constant to mask the correct bits
for both rv32 and rv64 before shifting.
Signed-off-by: Heiko Stuebner <heiko@...ech.de>
---
arch/riscv/include/asm/pgtable-32.h | 8 ++++++++
arch/riscv/include/asm/pgtable-64.h | 14 +++++++++++---
arch/riscv/include/asm/pgtable-bits.h | 6 ------
arch/riscv/include/asm/pgtable.h | 8 +++++---
4 files changed, 24 insertions(+), 12 deletions(-)
diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
index 5b2e79e5bfa5..e266a4fe7f43 100644
--- a/arch/riscv/include/asm/pgtable-32.h
+++ b/arch/riscv/include/asm/pgtable-32.h
@@ -7,6 +7,7 @@
#define _ASM_RISCV_PGTABLE_32_H
#include <asm-generic/pgtable-nopmd.h>
+#include <linux/bits.h>
#include <linux/const.h>
/* Size of region mapped by a page global directory */
@@ -16,4 +17,11 @@
#define MAX_POSSIBLE_PHYSMEM_BITS 34
+/*
+ * rv32 PTE format:
+ * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * PFN reserved for SW D A G U X W R V
+ */
+#define _PAGE_PFN_MASK GENMASK(31, 10)
+
#endif /* _ASM_RISCV_PGTABLE_32_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 7e246e9f8d70..15f3ad5aee4f 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -6,6 +6,7 @@
#ifndef _ASM_RISCV_PGTABLE_64_H
#define _ASM_RISCV_PGTABLE_64_H
+#include <linux/bits.h>
#include <linux/const.h>
extern bool pgtable_l4_enabled;
@@ -65,6 +66,13 @@ typedef struct {
#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
+/*
+ * rv64 PTE format:
+ * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * N MT RSV PFN reserved for SW D A G U X W R V
+ */
+#define _PAGE_PFN_MASK GENMASK(53, 10)
+
static inline int pud_present(pud_t pud)
{
return (pud_val(pud) & _PAGE_PRESENT);
@@ -108,12 +116,12 @@ static inline unsigned long _pud_pfn(pud_t pud)
static inline pmd_t *pud_pgtable(pud_t pud)
{
- return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
+ return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud)));
}
static inline struct page *pud_page(pud_t pud)
{
- return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
+ return pfn_to_page(__page_val_to_pfn(pud_val(pud)));
}
#define mm_p4d_folded mm_p4d_folded
@@ -143,7 +151,7 @@ static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
static inline unsigned long _pmd_pfn(pmd_t pmd)
{
- return pmd_val(pmd) >> _PAGE_PFN_SHIFT;
+ return __page_val_to_pfn(pmd_val(pmd));
}
#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index a6b0c89824c2..e571fa954afc 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -6,12 +6,6 @@
#ifndef _ASM_RISCV_PGTABLE_BITS_H
#define _ASM_RISCV_PGTABLE_BITS_H
-/*
- * PTE format:
- * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
- * PFN reserved for SW D A G U X W R V
- */
-
#define _PAGE_ACCESSED_OFFSET 6
#define _PAGE_PRESENT (1 << 0)
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 046b44225623..faba543e2b08 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -108,6 +108,8 @@
#include <asm/tlbflush.h>
#include <linux/mm_types.h>
+#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
+
#ifdef CONFIG_64BIT
#include <asm/pgtable-64.h>
#else
@@ -261,12 +263,12 @@ static inline unsigned long _pgd_pfn(pgd_t pgd)
static inline struct page *pmd_page(pmd_t pmd)
{
- return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+ return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
}
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
- return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+ return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
}
static inline pte_t pmd_pte(pmd_t pmd)
@@ -282,7 +284,7 @@ static inline pte_t pud_pte(pud_t pud)
/* Yields the page frame number (PFN) of a page table entry */
static inline unsigned long pte_pfn(pte_t pte)
{
- return (pte_val(pte) >> _PAGE_PFN_SHIFT);
+ return __page_val_to_pfn(pte_val(pte));
}
#define pte_page(x) pfn_to_page(pte_pfn(x))
--
2.35.1
Powered by blists - more mailing lists