[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250617095104.6772-3-miko.lenczewski@arm.com>
Date: Tue, 17 Jun 2025 09:51:02 +0000
From: Mikołaj Lenczewski <miko.lenczewski@....com>
To: ryan.roberts@....com,
yang@...amperecomputing.com,
catalin.marinas@....com,
will@...nel.org,
jean-philippe@...aro.org,
robin.murphy@....com,
joro@...tes.org,
maz@...nel.org,
oliver.upton@...ux.dev,
joey.gouly@....com,
james.morse@....com,
broonie@...nel.org,
ardb@...nel.org,
baohua@...nel.org,
suzuki.poulose@....com,
david@...hat.com,
jgg@...pe.ca,
nicolinc@...dia.com,
jsnitsel@...hat.com,
mshavit@...gle.com,
kevin.tian@...el.com,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org,
iommu@...ts.linux.dev
Cc: Mikołaj Lenczewski <miko.lenczewski@....com>
Subject: [PATCH v7 2/4] arm64: Add BBM Level 2 cpu feature
The Break-Before-Make cpu feature supports multiple levels (levels 0-2),
and this commit adds a dedicated BBML2 cpufeature to test against
support for.
To support BBML2 in as wide a range of contexts as we can, we want not
only the architectural guarantees that BBML2 makes, but additionally
want BBML2 to not create TLB conflict aborts. Not causing aborts avoids
us having to prove that no recursive faults can be induced in any path
that uses BBML2, allowing its use for arbitrary kernel mappings.
This feature builds on the previous ARM64_CPUCAP_EARLY_LOCAL_CPU_FEATURE,
as all early cpus must support BBML2 for us to enable it (and any later
cpus must also support it to be onlined).
Not onlining late cpus that do not support BBML2 is unavoidable, as we
might currently be using BBML2 semantics for kernel memory regions. This
could cause faults in the late cpus, and would be difficult to unwind,
so let us avoid the case altogether.
Signed-off-by: Mikołaj Lenczewski <miko.lenczewski@....com>
---
arch/arm64/include/asm/cpufeature.h | 5 ++++
arch/arm64/kernel/cpufeature.c | 40 +++++++++++++++++++++++++++++
arch/arm64/tools/cpucaps | 1 +
3 files changed, 46 insertions(+)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 155ebd040c55..bf13d676aae2 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -871,6 +871,11 @@ static inline bool system_supports_pmuv3(void)
return cpus_have_final_cap(ARM64_HAS_PMUV3);
}
+static inline bool system_supports_bbml2_noabort(void)
+{
+ return alternative_has_cap_unlikely(ARM64_HAS_BBML2_NOABORT);
+}
+
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f9c947166322..2e80ff237b96 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2213,6 +2213,41 @@ static bool hvhe_possible(const struct arm64_cpu_capabilities *entry,
return arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_HVHE);
}
+static bool has_bbml2_noabort(const struct arm64_cpu_capabilities *caps, int scope)
+{
+ /*
+ * We want to allow usage of BBML2 in as wide a range of kernel contexts
+ * as possible. This list is therefore an allow-list of known-good
+ * implementations that both support BBML2 and additionally, fulfill the
+ * extra constraint of never generating TLB conflict aborts when using
+ * the relaxed BBML2 semantics (such aborts make use of BBML2 in certain
+ * kernel contexts difficult to prove safe against recursive aborts).
+ *
+ * Note that implementations can only be considered "known-good" if their
+ * implementors attest to the fact that the implementation never raises
+ * TLBI conflict aborts for BBML2 mapping granularity changes.
+ */
+ static const struct midr_range supports_bbml2_noabort_list[] = {
+ MIDR_REV_RANGE(MIDR_CORTEX_X4, 0, 3, 0xf),
+ MIDR_REV_RANGE(MIDR_NEOVERSE_V3, 0, 2, 0xf),
+ {}
+ };
+
+ /* Does our cpu guarantee to never raise TLB conflict aborts? */
+ if (!is_midr_in_range_list(supports_bbml2_noabort_list))
+ return false;
+
+ /*
+ * We currently ignore the AA64_ID_MMFR2 register, and only care about
+ * whether the MIDR check passes. This is because we specifically
+ * care only about a stricter form of BBML2 (one guaranteeing noabort),
+ * and so the MMFR2 check is pointless (all implementations passing the
+ * MIDR check should also pass the MMFR2 check).
+ */
+
+ return true;
+}
+
#ifdef CONFIG_ARM64_PAN
static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
{
@@ -2980,6 +3015,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
},
+ {
+ .capability = ARM64_HAS_BBML2_NOABORT,
+ .type = ARM64_CPUCAP_EARLY_LOCAL_CPU_FEATURE,
+ .matches = has_bbml2_noabort,
+ },
{
.desc = "52-bit Virtual Addressing for KVM (LPA2)",
.capability = ARM64_HAS_LPA2,
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 10effd4cff6b..2bd2bfaeddcd 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -45,6 +45,7 @@ HAS_LPA2
HAS_LSE_ATOMICS
HAS_MOPS
HAS_NESTED_VIRT
+HAS_BBML2_NOABORT
HAS_PAN
HAS_PMUV3
HAS_S1PIE
--
2.49.0
Powered by blists - more mailing lists