[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230307140522.2311461-46-ardb@kernel.org>
Date: Tue, 7 Mar 2023 15:05:07 +0100
From: Ard Biesheuvel <ardb@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: linux-arm-kernel@...ts.infradead.org,
Ard Biesheuvel <ardb@...nel.org>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Ryan Roberts <ryan.roberts@....com>,
Anshuman Khandual <anshuman.khandual@....com>,
Kees Cook <keescook@...omium.org>
Subject: [PATCH v3 45/60] arm64: mm: Wire up TCR.DS bit to PTE shareability fields
When LPA2 is enabled, bits 8 and 9 of page and block descriptors become
part of the output address instead of carrying shareability attributes
for the region in question.
So avoid setting these bits if TCR.DS == 1, which means LPA2 is enabled.
Signed-off-by: Ard Biesheuvel <ardb@...nel.org>
---
arch/arm64/Kconfig | 4 ++++
arch/arm64/include/asm/pgtable-prot.h | 18 ++++++++++++++++--
arch/arm64/mm/mmap.c | 4 ++++
3 files changed, 24 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1023e896d46b8969..d287dad29198c843 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1287,6 +1287,10 @@ config ARM64_PA_BITS
default 48 if ARM64_PA_BITS_48
default 52 if ARM64_PA_BITS_52
+config ARM64_LPA2
+ def_bool y
+ depends on ARM64_PA_BITS_52 && !ARM64_64K_PAGES
+
choice
prompt "Endianness"
default CPU_LITTLE_ENDIAN
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 9b165117a454595a..269584d5a2c017fc 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -40,6 +40,20 @@ extern bool arm64_use_ng_mappings;
#define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0)
#define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0)
+#ifndef CONFIG_ARM64_LPA2
+#define lpa2_is_enabled() false
+#define PTE_MAYBE_SHARED PTE_SHARED
+#define PMD_MAYBE_SHARED PMD_SECT_S
+#else
+static inline bool __pure lpa2_is_enabled(void)
+{
+ return read_tcr() & TCR_DS;
+}
+
+#define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
+#define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
+#endif
+
/*
* If we have userspace only BTI we don't want to mark kernel pages
* guarded even if the system does support BTI.
@@ -50,8 +64,8 @@ extern bool arm64_use_ng_mappings;
#define PTE_MAYBE_GP 0
#endif
-#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
-#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
+#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_MAYBE_NG | PTE_MAYBE_SHARED | PTE_AF)
+#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_MAYBE_NG | PMD_MAYBE_SHARED | PMD_SECT_AF)
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 8f5b7ce857ed4a8f..adcf547f74eb8e60 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -73,6 +73,10 @@ static int __init adjust_protection_map(void)
protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
}
+ if (lpa2_is_enabled())
+ for (int i = 0; i < ARRAY_SIZE(protection_map); i++)
+ pgprot_val(protection_map[i]) &= ~PTE_SHARED;
+
return 0;
}
arch_initcall(adjust_protection_map);
--
2.39.2
Powered by blists - more mailing lists