[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200414213114.2378-7-will@kernel.org>
Date: Tue, 14 Apr 2020 22:31:12 +0100
From: Will Deacon <will@...nel.org>
To: linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.cs.columbia.edu
Cc: linux-kernel@...r.kernel.org, Will Deacon <will@...nel.org>,
Suzuki K Poulose <suzuki.poulose@....com>,
Mark Rutland <mark.rutland@....com>,
Marc Zyngier <maz@...nel.org>,
Anshuman Khandual <anshuman.khandual@....com>,
Catalin Marinas <catalin.marinas@....com>,
Sai Prakash Ranjan <saiprakash.ranjan@...eaurora.org>,
Doug Anderson <dianders@...omium.org>, kernel-team@...roid.com
Subject: [PATCH 6/8] arm64: cpufeature: Relax AArch32 system checks if EL1 is 64-bit only
If AArch32 is not supported at EL1, the AArch32 feature register fields
no longer advertise support for some system features:
* ISAR4.SMC
* PFR1.{Virt_frac, Sec_frac, Virtualization, Security, ProgMod}
In which case, we don't need to emit "SANITY CHECK" failures for all of
them.
Add logic to relax the strictness of individual feature register fields
at runtime and use this for the fields above if 32-bit EL1 is not
supported.
Signed-off-by: Will Deacon <will@...nel.org>
---
arch/arm64/include/asm/cpufeature.h | 7 ++++++
arch/arm64/kernel/cpufeature.c | 33 ++++++++++++++++++++++++++++-
2 files changed, 39 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index afe08251ff95..f5c4672e498b 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -551,6 +551,13 @@ static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
}
+static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
+{
+ u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_SHIFT);
+
+ return val == ID_AA64PFR0_EL1_32BIT_64BIT;
+}
+
static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 32828a77acc3..9e0321e3e581 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -715,6 +715,25 @@ static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
return 1;
}
+static void relax_cpu_ftr_reg(u32 sys_id, int field)
+{
+ const struct arm64_ftr_bits *ftrp;
+ struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
+
+ if (WARN_ON(!regp))
+ return;
+
+ for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) {
+ if (ftrp->shift == field) {
+ regp->strict_mask &= ~arm64_ftr_mask(ftrp);
+ break;
+ }
+ }
+
+ /* Bogus field? */
+ WARN_ON(!ftrp->width);
+}
+
static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info,
struct cpuinfo_arm64 *boot)
{
@@ -729,6 +748,19 @@ static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info,
if (!id_aa64pfr0_32bit_el0(pfr0))
return taint;
+ /*
+ * If we don't have AArch32 at EL1, then relax the strictness of
+ * EL1-dependent register fields to avoid spurious sanity check fails.
+ */
+ if (!id_aa64pfr0_32bit_el1(pfr0)) {
+ relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_SMC_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRT_FRAC_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SEC_FRAC_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_VIRTUALIZATION_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_SECURITY_SHIFT);
+ relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_PROGMOD_SHIFT);
+ }
+
taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
info->reg_id_dfr0, boot->reg_id_dfr0);
taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
@@ -773,7 +805,6 @@ static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info,
return taint;
}
-
/*
* Update system wide CPU feature registers with the values from a
* non-boot CPU. Also performs SANITY checks to make sure that there
--
2.26.0.110.g2183baf09c-goog
Powered by blists - more mailing lists