[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191114145918.235339-5-suzuki.poulose@arm.com>
Date: Thu, 14 Nov 2019 14:59:17 +0000
From: Suzuki K Poulose <suzuki.poulose@....com>
To: linux-arm-kernel@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org, james.morse@....com, will@...nel.org,
catalin.marinas@....com, mark.rutland@....com, maz@...nel.org,
suzuki.poulose@....com
Subject: [PATCH 4/5] KVM: arm64: Workaround Cortex-A77 erratum 1542418 on VMID rollover
From: James Morse <james.morse@....com>
Cortex-A77's erratum 1542418 workaround needs to be applied for VMID
re-use too. This prevents the CPU correctly predicting a modified branch
based on a previous user of the VMID and ASID.
KVM doesn't use force_vm_exit or exit_vm_noop for anything other than
vmid rollover. Rename them, and use this to invoke the VMID workaround
on each CPU.
Another case where VMID and ASID may get reused is if the system is
over-provisioned and two vCPUs of the same VMID are scheduled on
one physical CPU. KVM invalidates the TLB to prevent ASID sharing
in this case, invoke the asid-rollover workaround too so we avoid
the ASID sharing tripping the erratum.
Signed-off-by: James Morse <james.morse@....com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@....com>
---
arch/arm/include/asm/kvm_mmu.h | 5 +++++
arch/arm64/include/asm/kvm_mmu.h | 15 +++++++++++++++
virt/kvm/arm/arm.c | 20 ++++++++++++++------
3 files changed, 34 insertions(+), 6 deletions(-)
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 0d84d50bf9ba..8a5702e0c3f8 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -430,6 +430,11 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
return kvm_phys_to_vttbr(baddr) | vmid_field;
}
+static inline void kvm_workaround_1542418_vmid_rollover(void)
+{
+ /* not affected */
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index befe37d4bc0e..5776e53c296d 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -9,6 +9,7 @@
#include <asm/page.h>
#include <asm/memory.h>
+#include <asm/mmu_context.h>
#include <asm/cpufeature.h>
/*
@@ -603,5 +604,19 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
}
+static inline void kvm_workaround_1542418_vmid_rollover(void)
+{
+ unsigned long flags;
+
+ if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1542418) ||
+ !cpus_have_const_cap(ARM64_WORKAROUND_1542418))
+ return;
+
+ local_irq_save(flags);
+ arm64_workaround_1542418_asid_rollover();
+ local_irq_restore(flags);
+
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 86c6aa1cb58e..ac9e017df7c9 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -368,6 +368,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
*/
if (*last_ran != vcpu->vcpu_id) {
kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+
+ /*
+ * 'last_ran' and this vcpu may share an ASID and hit the
+ * conditions for Cortex-A77 erratum 1542418.
+ */
+ kvm_workaround_1542418_vmid_rollover();
+
*last_ran = vcpu->vcpu_id;
}
@@ -458,15 +465,16 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
return vcpu_mode_priv(vcpu);
}
-/* Just ensure a guest exit from a particular CPU */
-static void exit_vm_noop(void *info)
+static void exit_vmid_rollover(void *info)
{
+ kvm_workaround_1542418_vmid_rollover();
}
-void force_vm_exit(const cpumask_t *mask)
+static void force_vmid_rollover_exit(const cpumask_t *mask)
{
preempt_disable();
- smp_call_function_many(mask, exit_vm_noop, NULL, true);
+ smp_call_function_many(mask, exit_vmid_rollover, NULL, true);
+ kvm_workaround_1542418_vmid_rollover();
preempt_enable();
}
@@ -518,10 +526,10 @@ static void update_vmid(struct kvm_vmid *vmid)
/*
* On SMP we know no other CPUs can use this CPU's or each
- * other's VMID after force_vm_exit returns since the
+ * other's VMID after force_vmid_rollover_exit returns since the
* kvm_vmid_lock blocks them from reentry to the guest.
*/
- force_vm_exit(cpu_all_mask);
+ force_vmid_rollover_exit(cpu_all_mask);
/*
* Now broadcast TLB + ICACHE invalidation over the inner
* shareable domain to make sure all data structures are
--
2.23.0
Powered by blists - more mailing lists