[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <1589249606-27177-1-git-send-email-anshuman.khandual@arm.com>
Date: Tue, 12 May 2020 07:43:26 +0530
From: Anshuman Khandual <anshuman.khandual@....com>
To: linux-arm-kernel@...ts.infradead.org
Cc: Anshuman Khandual <anshuman.khandual@....com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>,
James Morse <james.morse@....com>,
linux-kernel@...r.kernel.org, kvmarm@...ts.cs.columbia.edu
Subject: [PATCH] arm64/cpufeature: Add ID_AA64MMFR0_PARANGE_MASK
This replaces multiple open encoding (0x7) with ID_AA64MMFR0_PARANGE_MASK
thus cleaning the clutter. It modifies an existing ID_AA64MMFR0 helper and
introduces a new one i.e id_aa64mmfr0_iparange() and id_aa64mmfr0_parange()
respectively.
Cc: Catalin Marinas <catalin.marinas@....com>
Cc: Will Deacon <will@...nel.org>
Cc: Marc Zyngier <maz@...nel.org>
Cc: James Morse <james.morse@....com>
Cc: linux-arm-kernel@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org
Cc: kvmarm@...ts.cs.columbia.edu
Signed-off-by: Anshuman Khandual <anshuman.khandual@....com>
---
This applies after (https://patchwork.kernel.org/patch/11541893/).
arch/arm64/include/asm/cpufeature.h | 11 ++++++++++-
arch/arm64/kernel/cpufeature.c | 5 ++---
arch/arm64/kvm/reset.c | 9 +++++----
3 files changed, 17 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 1291ad5a9ccb..320cfc5b6025 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -706,8 +706,17 @@ void arm64_set_ssbd_mitigation(bool state);
extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
-static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
+#define ID_AA64MMFR0_PARANGE_MASK 0x7
+
+static inline u32 id_aa64mmfr0_parange(u64 mmfr0)
{
+ return mmfr0 & ID_AA64MMFR0_PARANGE_MASK;
+}
+
+static inline u32 id_aa64mmfr0_iparange(u64 mmfr0)
+{
+ int parange = id_aa64mmfr0_parange(mmfr0);
+
switch (parange) {
case 0: return 32;
case 1: return 36;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 30917fe7942a..2c62f7c64a3c 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2185,7 +2185,7 @@ static void verify_sve_features(void)
void verify_hyp_capabilities(void)
{
u64 safe_mmfr1, mmfr0, mmfr1;
- int parange, ipa_max;
+ int ipa_max;
unsigned int safe_vmid_bits, vmid_bits;
safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
@@ -2201,8 +2201,7 @@ void verify_hyp_capabilities(void)
}
/* Verify IPA range */
- parange = mmfr0 & 0x7;
- ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
+ ipa_max = id_aa64mmfr0_iparange(mmfr0);
if (ipa_max < get_kvm_ipa_limit()) {
pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());
cpu_die_early();
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 841b492ff334..2e4da75d79ea 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -347,10 +347,10 @@ u32 get_kvm_ipa_limit(void)
void kvm_set_ipa_limit(void)
{
- unsigned int ipa_max, pa_max, va_max, parange;
+ unsigned int ipa_max, pa_max, va_max;
- parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7;
- pa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
+ pa_max = id_aa64mmfr0_iparange(read_sanitised_ftr_reg
+ (SYS_ID_AA64MMFR0_EL1));
/* Clamp the IPA limit to the PA size supported by the kernel */
ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max;
@@ -411,7 +411,8 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
phys_shift = KVM_PHYS_SHIFT;
}
- parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7;
+ parange = id_aa64mmfr0_parange(read_sanitised_ftr_reg
+ (SYS_ID_AA64MMFR0_EL1));
if (parange > ID_AA64MMFR0_PARANGE_MAX)
parange = ID_AA64MMFR0_PARANGE_MAX;
vtcr |= parange << VTCR_EL2_PS_SHIFT;
--
2.20.1
Powered by blists - more mailing lists