[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200227084608.18223-12-rppt@kernel.org>
Date: Thu, 27 Feb 2020 10:46:05 +0200
From: Mike Rapoport <rppt@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Arnd Bergmann <arnd@...db.de>,
Benjamin Herrenschmidt <benh@...nel.crashing.org>,
Brian Cain <bcain@...eaurora.org>,
Catalin Marinas <catalin.marinas@....com>,
Christophe Leroy <christophe.leroy@....fr>,
Fenghua Yu <fenghua.yu@...el.com>,
Geert Uytterhoeven <geert+renesas@...der.be>,
Guan Xuetao <gxt@....edu.cn>,
James Morse <james.morse@....com>,
Jonas Bonn <jonas@...thpole.se>,
Julien Thierry <julien.thierry.kdev@...il.com>,
Ley Foon Tan <ley.foon.tan@...el.com>,
Marc Zyngier <maz@...nel.org>,
Michael Ellerman <mpe@...erman.id.au>,
Paul Mackerras <paulus@...ba.org>,
Rich Felker <dalias@...c.org>,
Russell King <linux@...linux.org.uk>,
Stafford Horne <shorne@...il.com>,
Stefan Kristiansson <stefan.kristiansson@...nalahti.fi>,
Suzuki K Poulose <suzuki.poulose@....com>,
Tony Luck <tony.luck@...el.com>, Will Deacon <will@...nel.org>,
Yoshinori Sato <ysato@...rs.sourceforge.jp>,
kvmarm@...ts.cs.columbia.edu, kvm-ppc@...r.kernel.org,
linux-arch@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
linux-hexagon@...r.kernel.org, linux-ia64@...r.kernel.org,
linux-mm@...ck.org, linuxppc-dev@...ts.ozlabs.org,
linux-sh@...r.kernel.org, nios2-dev@...ts.rocketboards.org,
openrisc@...ts.librecores.org,
uclinux-h8-devel@...ts.sourceforge.jp,
Mike Rapoport <rppt@...nel.org>,
Mike Rapoport <rppt@...ux.ibm.com>
Subject: [PATCH v3 11/14] sh: add support for folded p4d page tables
From: Mike Rapoport <rppt@...ux.ibm.com>
Implement primitives necessary for the 4th level folding, add walks of p4d
level where appropriate and remove usage of __ARCH_USE_5LEVEL_HACK.
Signed-off-by: Mike Rapoport <rppt@...ux.ibm.com>
---
arch/sh/include/asm/pgtable-2level.h | 1 -
arch/sh/include/asm/pgtable-3level.h | 1 -
arch/sh/kernel/io_trapped.c | 7 ++++++-
arch/sh/mm/cache-sh4.c | 4 +++-
arch/sh/mm/cache-sh5.c | 7 ++++++-
arch/sh/mm/fault.c | 26 +++++++++++++++++++++++---
arch/sh/mm/hugetlbpage.c | 28 ++++++++++++++++++----------
arch/sh/mm/init.c | 9 ++++++++-
arch/sh/mm/kmap.c | 2 +-
arch/sh/mm/tlbex_32.c | 6 +++++-
arch/sh/mm/tlbex_64.c | 7 ++++++-
11 files changed, 76 insertions(+), 22 deletions(-)
diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h
index bf1eb51c3ee5..08bff93927ff 100644
--- a/arch/sh/include/asm/pgtable-2level.h
+++ b/arch/sh/include/asm/pgtable-2level.h
@@ -2,7 +2,6 @@
#ifndef __ASM_SH_PGTABLE_2LEVEL_H
#define __ASM_SH_PGTABLE_2LEVEL_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/*
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 779260b721ca..0f80097e5c9c 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -2,7 +2,6 @@
#ifndef __ASM_SH_PGTABLE_3LEVEL_H
#define __ASM_SH_PGTABLE_3LEVEL_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
/*
diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c
index 60c828a2b8a2..037aab2708b7 100644
--- a/arch/sh/kernel/io_trapped.c
+++ b/arch/sh/kernel/io_trapped.c
@@ -136,6 +136,7 @@ EXPORT_SYMBOL_GPL(match_trapped_io_handler);
static struct trapped_io *lookup_tiop(unsigned long address)
{
pgd_t *pgd_k;
+ p4d_t *p4d_k;
pud_t *pud_k;
pmd_t *pmd_k;
pte_t *pte_k;
@@ -145,7 +146,11 @@ static struct trapped_io *lookup_tiop(unsigned long address)
if (!pgd_present(*pgd_k))
return NULL;
- pud_k = pud_offset(pgd_k, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ return NULL;
+
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
return NULL;
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index eee911422cf9..45943bcb7042 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -209,6 +209,7 @@ static void sh4_flush_cache_page(void *args)
unsigned long address, pfn, phys;
int map_coherent = 0;
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -224,7 +225,8 @@ static void sh4_flush_cache_page(void *args)
return;
pgd = pgd_offset(vma->vm_mm, address);
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ pud = pud_offset(p4d, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index 445b5e69b73c..442a77cc2957 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -383,6 +383,7 @@ static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
unsigned long addr, unsigned long end)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -397,7 +398,11 @@ static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
if (pgd_bad(*pgd))
return;
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d) || p4d_bad(*p4d))
+ return;
+
+ pud = pud_offset(p4d, addr);
if (pud_none(*pud) || pud_bad(*pud))
return;
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index a2b0275413e8..ebd30003fd06 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -53,6 +53,7 @@ static void show_pte(struct mm_struct *mm, unsigned long addr)
(u64)pgd_val(*pgd));
do {
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -65,7 +66,20 @@ static void show_pte(struct mm_struct *mm, unsigned long addr)
break;
}
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (PTRS_PER_P4D != 1)
+ pr_cont(", *p4d=%0*Lx", (u32)(sizeof(*p4d) * 2),
+ (u64)p4d_val(*p4d));
+
+ if (p4d_none(*p4d))
+ break;
+
+ if (p4d_bad(*p4d)) {
+ pr_cont("(bad)");
+ break;
+ }
+
+ pud = pud_offset(p4d, addr);
if (PTRS_PER_PUD != 1)
pr_cont(", *pud=%0*llx", (u32)(sizeof(*pud) * 2),
(u64)pud_val(*pud));
@@ -107,6 +121,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
{
unsigned index = pgd_index(address);
pgd_t *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
@@ -116,8 +131,13 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
if (!pgd_present(*pgd_k))
return NULL;
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (!p4d_present(*p4d_k))
+ return NULL;
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
return NULL;
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 960deb1f24a1..acd5652a0de3 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -26,17 +26,21 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
if (pgd) {
- pud = pud_alloc(mm, pgd, addr);
- if (pud) {
- pmd = pmd_alloc(mm, pud, addr);
- if (pmd)
- pte = pte_alloc_map(mm, pmd, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (p4d) {
+ pud = pud_alloc(mm, p4d, addr);
+ if (pud) {
+ pmd = pmd_alloc(mm, pud, addr);
+ if (pmd)
+ pte = pte_alloc_map(mm, pmd, addr);
+ }
}
}
@@ -47,17 +51,21 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
if (pgd) {
- pud = pud_offset(pgd, addr);
- if (pud) {
- pmd = pmd_offset(pud, addr);
- if (pmd)
- pte = pte_offset_map(pmd, addr);
+ p4d = p4d_offset(pgd, addr);
+ if (p4d) {
+ pud = pud_offset(p4d, addr);
+ if (pud) {
+ pmd = pmd_offset(pud, addr);
+ if (pmd)
+ pte = pte_offset_map(pmd, addr);
+ }
}
}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 4bab79baee75..594203530d43 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -45,6 +45,7 @@ void __init __weak plat_mem_setup(void)
static pte_t *__get_pte_phys(unsigned long addr)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@@ -54,7 +55,13 @@ static pte_t *__get_pte_phys(unsigned long addr)
return NULL;
}
- pud = pud_alloc(NULL, pgd, addr);
+ p4d = p4d_alloc(NULL, pgd, addr);
+ if (unlikely(!p4d)) {
+ p4d_ERROR(*p4d);
+ return NULL;
+ }
+
+ pud = pud_alloc(NULL, p4d, addr);
if (unlikely(!pud)) {
pud_ERROR(*pud);
return NULL;
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index 9e6b38b03cf7..0e7039137f5a 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -15,7 +15,7 @@
#include <asm/cacheflush.h>
#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
+ pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)), vaddr)
static pte_t *kmap_coherent_pte;
diff --git a/arch/sh/mm/tlbex_32.c b/arch/sh/mm/tlbex_32.c
index 382262dc0c4b..1c53868632ee 100644
--- a/arch/sh/mm/tlbex_32.c
+++ b/arch/sh/mm/tlbex_32.c
@@ -23,6 +23,7 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -42,7 +43,10 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
pgd = pgd_offset(current->mm, address);
}
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none_or_clear_bad(p4d))
+ return 1;
+ pud = pud_offset(p4d, address);
if (pud_none_or_clear_bad(pud))
return 1;
pmd = pmd_offset(pud, address);
diff --git a/arch/sh/mm/tlbex_64.c b/arch/sh/mm/tlbex_64.c
index 8ff966dd0c74..0d015f7556fa 100644
--- a/arch/sh/mm/tlbex_64.c
+++ b/arch/sh/mm/tlbex_64.c
@@ -44,6 +44,7 @@ static int handle_tlbmiss(unsigned long long protection_flags,
unsigned long address)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -58,7 +59,11 @@ static int handle_tlbmiss(unsigned long long protection_flags,
pgd = pgd_offset(current->mm, address);
}
- pud = pud_offset(pgd, address);
+ p4d = p4d_offset(pgd, address);
+ if (p4d_none(*p4d) || !p4d_present(*p4d))
+ return 1;
+
+ pud = pud_offset(p4d, address);
if (pud_none(*pud) || !pud_present(*pud))
return 1;
--
2.24.0
Powered by blists - more mailing lists