[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220126173011.3476262-6-ardb@kernel.org>
Date: Wed, 26 Jan 2022 18:30:04 +0100
From: Ard Biesheuvel <ardb@...nel.org>
To: linux-arm-kernel@...ts.infradead.org
Cc: kvmarm@...ts.cs.columbia.edu, linux-hardening@...r.kernel.org,
Ard Biesheuvel <ardb@...nel.org>,
Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
Fuad Tabba <tabba@...gle.com>,
Quentin Perret <qperret@...gle.com>,
Mark Rutland <mark.rutland@....com>,
James Morse <james.morse@....com>,
Catalin Marinas <catalin.marinas@....com>
Subject: [RFC PATCH 05/12] arm64: mm: remap PUD pages r/o in linear region
Implement the arch specific PUD alloc/free helpers by wrapping the
generic code, and remapping the page read-only on allocation and
read-write on free.
Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
---
arch/arm64/include/asm/pgalloc.h | 5 +++++
arch/arm64/include/asm/tlb.h | 2 ++
arch/arm64/mm/mmu.c | 20 ++++++++++++++++++++
3 files changed, 27 insertions(+)
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index d54ac9f8d6c7..737e9f32b199 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -14,6 +14,8 @@
#include <asm/tlbflush.h>
#define __HAVE_ARCH_PGD_FREE
+#define __HAVE_ARCH_PUD_ALLOC_ONE
+#define __HAVE_ARCH_PUD_FREE
#include <asm-generic/pgalloc.h>
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
@@ -45,6 +47,9 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
#if CONFIG_PGTABLE_LEVELS > 3
+pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr);
+void pud_free(struct mm_struct *mm, pud_t *pud);
+
static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
{
set_p4d(p4dp, __p4d(__phys_to_p4d_val(pudp) | prot));
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index c995d1f4594f..6557626752fc 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -94,6 +94,8 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
+ if (page_tables_are_ro())
+ set_pgtable_rw(pudp);
tlb_remove_table(tlb, virt_to_page(pudp));
}
#endif
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a52c3162beae..03d77c4c3570 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1645,3 +1645,23 @@ static int __init prevent_bootmem_remove_init(void)
}
early_initcall(prevent_bootmem_remove_init);
#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ pud_t *pud = __pud_alloc_one(mm, addr);
+
+ if (!pud)
+ return NULL;
+ if (page_tables_are_ro())
+ set_pgtable_ro(pud);
+ return pud;
+}
+
+void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ if (page_tables_are_ro())
+ set_pgtable_rw(pud);
+ free_page((u64)pud);
+}
+#endif
--
2.30.2
Powered by blists - more mailing lists