[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220126173011.3476262-8-ardb@kernel.org>
Date: Wed, 26 Jan 2022 18:30:06 +0100
From: Ard Biesheuvel <ardb@...nel.org>
To: linux-arm-kernel@...ts.infradead.org
Cc: kvmarm@...ts.cs.columbia.edu, linux-hardening@...r.kernel.org,
Ard Biesheuvel <ardb@...nel.org>,
Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
Fuad Tabba <tabba@...gle.com>,
Quentin Perret <qperret@...gle.com>,
Mark Rutland <mark.rutland@....com>,
James Morse <james.morse@....com>,
Catalin Marinas <catalin.marinas@....com>
Subject: [RFC PATCH 07/12] arm64: mm: remap PTE level user page tables r/o in the linear region
Now that all PTE manipulations for user space tables go via the fixmap,
we can remap these tables read-only in the linear region so they cannot
be corrupted inadvertently.
Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
---
arch/arm64/include/asm/pgalloc.h | 5 +++++
arch/arm64/include/asm/tlb.h | 2 ++
arch/arm64/mm/mmu.c | 23 ++++++++++++++++++++
3 files changed, 30 insertions(+)
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 63f9ae9e96fe..18a5bb0c9ee4 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -18,10 +18,15 @@
#define __HAVE_ARCH_PUD_FREE
#define __HAVE_ARCH_PMD_ALLOC_ONE
#define __HAVE_ARCH_PMD_FREE
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#define __HAVE_ARCH_PTE_FREE
#include <asm-generic/pgalloc.h>
#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+pgtable_t pte_alloc_one(struct mm_struct *mm);
+void pte_free(struct mm_struct *mm, struct page *pte_page);
+
#if CONFIG_PGTABLE_LEVELS > 2
pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr);
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 0f54fbb59bba..e69a44160cce 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -75,6 +75,8 @@ static inline void tlb_flush(struct mmu_gather *tlb)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
+ if (page_tables_are_ro())
+ set_pgtable_rw(page_address(pte));
pgtable_pte_page_dtor(pte);
tlb_remove_table(tlb, pte);
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e55d91a5f1ed..949846654797 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1686,3 +1686,26 @@ void pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((u64)pmd);
}
#endif
+
+pgtable_t pte_alloc_one(struct mm_struct *mm)
+{
+ pgtable_t pgt = __pte_alloc_one(mm, GFP_PGTABLE_USER);
+
+ VM_BUG_ON(mm == &init_mm);
+
+ if (!pgt)
+ return NULL;
+ if (page_tables_are_ro())
+ set_pgtable_ro(page_address(pgt));
+ return pgt;
+}
+
+void pte_free(struct mm_struct *mm, struct page *pte_page)
+{
+ VM_BUG_ON(mm == &init_mm);
+
+ if (page_tables_are_ro())
+ set_pgtable_rw(page_address(pte_page));
+ pgtable_pte_page_dtor(pte_page);
+ __free_page(pte_page);
+}
--
2.30.2
Powered by blists - more mailing lists