[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180720121412.359340881@linuxfoundation.org>
Date: Fri, 20 Jul 2018 14:14:20 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org, stable@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Christoffer Dall <christoffer.dall@....com>,
Mark Rutland <mark.rutland@....com>,
Marc Zyngier <marc.zyngier@....com>,
Catalin Marinas <catalin.marinas@....com>
Subject: [PATCH 4.9 63/66] arm64: KVM: Add ARCH_WORKAROUND_2 support for guests
4.9-stable review patch. If anyone has any objections, please let me know.
------------------
From: Marc Zyngier <marc.zyngier@....com>
commit 55e3748e8902ff641e334226bdcb432f9a5d78d3 upstream.
In order to offer ARCH_WORKAROUND_2 support to guests, we need
a bit of infrastructure.
Let's add a flag indicating whether or not the guest uses
SSBD mitigation. Depending on the state of this flag, allow
KVM to disable ARCH_WORKAROUND_2 before entering the guest,
and enable it when exiting it.
Reviewed-by: Christoffer Dall <christoffer.dall@....com>
Reviewed-by: Mark Rutland <mark.rutland@....com>
Signed-off-by: Marc Zyngier <marc.zyngier@....com>
Signed-off-by: Catalin Marinas <catalin.marinas@....com>
Signed-off-by: Marc Zyngier <marc.zyngier@....com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
arch/arm/include/asm/kvm_mmu.h | 5 +++++
arch/arm/kvm/arm.c | 6 ++++++
arch/arm64/include/asm/kvm_asm.h | 3 +++
arch/arm64/include/asm/kvm_host.h | 3 +++
arch/arm64/include/asm/kvm_mmu.h | 24 ++++++++++++++++++++++++
arch/arm64/kvm/hyp/switch.c | 38 ++++++++++++++++++++++++++++++++++++++
6 files changed, 79 insertions(+)
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -256,6 +256,11 @@ static inline int kvm_map_vectors(void)
return 0;
}
+static inline int hyp_map_aux_data(void)
+{
+ return 0;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1367,6 +1367,12 @@ static int init_hyp_mode(void)
}
}
+ err = hyp_map_aux_data();
+ if (err) {
+ kvm_err("Cannot map host auxilary data: %d\n", err);
+ goto out_err;
+ }
+
kvm_info("Hyp mode initialized successfully\n");
return 0;
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -33,6 +33,9 @@
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
+#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
+#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
+
/* Translate a kernel address of @sym into its equivalent linear mapping */
#define kvm_ksym_ref(sym) \
({ \
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -213,6 +213,9 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;
+ /* State of various workarounds, see kvm_asm.h for bit assignment */
+ u64 workaround_flags;
+
/* Guest debug state */
u64 debug_flags;
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -387,5 +387,29 @@ static inline int kvm_map_vectors(void)
}
#endif
+#ifdef CONFIG_ARM64_SSBD
+DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
+
+static inline int hyp_map_aux_data(void)
+{
+ int cpu, err;
+
+ for_each_possible_cpu(cpu) {
+ u64 *ptr;
+
+ ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
+ err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+#else
+static inline int hyp_map_aux_data(void)
+{
+ return 0;
+}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -15,6 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>
@@ -267,6 +268,39 @@ static void __hyp_text __skip_instr(stru
write_sysreg_el2(*vcpu_pc(vcpu), elr);
}
+static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
+{
+ if (!cpus_have_cap(ARM64_SSBD))
+ return false;
+
+ return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
+}
+
+static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ARM64_SSBD
+ /*
+ * The host runs with the workaround always present. If the
+ * guest wants it disabled, so be it...
+ */
+ if (__needs_ssbd_off(vcpu) &&
+ __hyp_this_cpu_read(arm64_ssbd_callback_required))
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
+#endif
+}
+
+static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ARM64_SSBD
+ /*
+ * If the guest has disabled the workaround, bring it back on.
+ */
+ if (__needs_ssbd_off(vcpu) &&
+ __hyp_this_cpu_read(arm64_ssbd_callback_required))
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
+#endif
+}
+
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
@@ -297,6 +331,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm
__sysreg_restore_guest_state(guest_ctxt);
__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
+ __set_guest_arch_workaround_state(vcpu);
+
/* Jump in the fire! */
again:
exit_code = __guest_enter(vcpu, host_ctxt);
@@ -339,6 +375,8 @@ again:
}
}
+ __set_host_arch_workaround_state(vcpu);
+
fp_enabled = __fpsimd_enabled();
__sysreg_save_guest_state(guest_ctxt);
Powered by blists - more mailing lists