[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181016170530.202609682@linuxfoundation.org>
Date: Tue, 16 Oct 2018 19:06:04 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Marc Zyngier <marc.zyngier@....com>,
Russell King <rmk+kernel@...linux.org.uk>,
Tony Lindgren <tony@...mide.com>,
"David A. Long" <dave.long@...aro.org>
Subject: [PATCH 4.14 096/109] ARM: KVM: invalidate icache on guest exit for Cortex-A15
4.14-stable review patch. If anyone has any objections, please let me know.
------------------
From: Marc Zyngier <marc.zyngier@....com>
Commit 0c47ac8cd157727e7a532d665d6fb1b5fd333977 upstream.
In order to avoid aliasing attacks against the branch predictor
on Cortex-A15, let's invalidate the BTB on guest exit, which can
only be done by invalidating the icache (with ACTLR[0] being set).
We use the same hack as for A12/A17 to perform the vector decoding.
Signed-off-by: Marc Zyngier <marc.zyngier@....com>
Signed-off-by: Russell King <rmk+kernel@...linux.org.uk>
Boot-tested-by: Tony Lindgren <tony@...mide.com>
Reviewed-by: Tony Lindgren <tony@...mide.com>
Signed-off-by: David A. Long <dave.long@...aro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
arch/arm/include/asm/kvm_mmu.h | 5 +++++
arch/arm/kvm/hyp/hyp-entry.S | 24 ++++++++++++++++++++++++
2 files changed, 29 insertions(+)
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -255,6 +255,11 @@ static inline void *kvm_get_hyp_vector(v
return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
}
+ case ARM_CPU_PART_CORTEX_A15:
+ {
+ extern char __kvm_hyp_vector_ic_inv[];
+ return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
+ }
#endif
default:
{
--- a/arch/arm/kvm/hyp/hyp-entry.S
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -73,6 +73,28 @@ __kvm_hyp_vector:
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
.align 5
+__kvm_hyp_vector_ic_inv:
+ .global __kvm_hyp_vector_ic_inv
+
+ /*
+ * We encode the exception entry in the bottom 3 bits of
+ * SP, and we have to guarantee to be 8 bytes aligned.
+ */
+ W(add) sp, sp, #1 /* Reset 7 */
+ W(add) sp, sp, #1 /* Undef 6 */
+ W(add) sp, sp, #1 /* Syscall 5 */
+ W(add) sp, sp, #1 /* Prefetch abort 4 */
+ W(add) sp, sp, #1 /* Data abort 3 */
+ W(add) sp, sp, #1 /* HVC 2 */
+ W(add) sp, sp, #1 /* IRQ 1 */
+ W(nop) /* FIQ 0 */
+
+ mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
+ isb
+
+ b decode_vectors
+
+ .align 5
__kvm_hyp_vector_bp_inv:
.global __kvm_hyp_vector_bp_inv
@@ -92,6 +114,8 @@ __kvm_hyp_vector_bp_inv:
mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
isb
+decode_vectors:
+
#ifdef CONFIG_THUMB2_KERNEL
/*
* Yet another silly hack: Use VPIDR as a temp register.
Powered by blists - more mailing lists