lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1500397144-16232-15-git-send-email-jintack.lim@linaro.org>
Date:   Tue, 18 Jul 2017 11:58:40 -0500
From:   Jintack Lim <jintack.lim@...aro.org>
To:     kvmarm@...ts.cs.columbia.edu, christoffer.dall@...aro.org,
        marc.zyngier@....com
Cc:     corbet@....net, pbonzini@...hat.com, rkrcmar@...hat.com,
        linux@...linux.org.uk, catalin.marinas@....com,
        will.deacon@....com, akpm@...ux-foundation.org, mchehab@...nel.org,
        cov@...eaurora.org, daniel.lezcano@...aro.org,
        david.daney@...ium.com, mark.rutland@....com,
        suzuki.poulose@....com, stefan@...lo-penguin.com,
        andy.gross@...aro.org, wcohen@...hat.com,
        ard.biesheuvel@...aro.org, shankerd@...eaurora.org,
        vladimir.murzin@....com, james.morse@....com,
        linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
        kvm@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
        Jintack Lim <jintack.lim@...aro.org>
Subject: [RFC PATCH v2 14/38] KVM: arm64: Synchronize EL1 system registers on virtual EL2 entry and exit

When running in virtual EL2 we use the shadow EL1 systerm register array
for the save/restore process, so that hardware and especially the memory
subsystem behaves as code written for EL2 expects while really running
in EL1.

This works great for EL1 system register accesses that we trap, because
these accesses will be written into the virtual state for the EL1 system
registers used when eventually switching the VCPU mode to EL1.

However, there was a collection of EL1 system registers which we do not
trap, and as a consequence all save/restore operations of these
registers were happening locally in the shadow array, with no benefit to
software actually running in virtual EL1 at all.

To fix this, simply synchronize the shadow and real EL1 state for these
registers on entry/exit to/from virtual EL2 state.

Signed-off-by: Christoffer Dall <christoffer.dall@...aro.org>
Signed-off-by: Jintack Lim <jintack.lim@...aro.org>
---
 arch/arm64/kvm/context.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 56 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/kvm/context.c b/arch/arm64/kvm/context.c
index e965049..e1bc753 100644
--- a/arch/arm64/kvm/context.c
+++ b/arch/arm64/kvm/context.c
@@ -86,6 +86,58 @@ static void flush_shadow_el1_sysregs(struct kvm_vcpu *vcpu)
 	s_sys_regs[CPACR_EL1] = cptr_to_cpacr(vcpu_sys_reg(vcpu, CPTR_EL2));
 }
 
+
+/*
+ * List of EL0 and EL1 registers which we allow the virtual EL2 mode to access
+ * directly without trapping. This is possible because the impact of
+ * accessing those registers are the same regardless of the exception
+ * levels that are allowed.
+ */
+static const int el1_non_trap_regs[] = {
+	CNTKCTL_EL1,
+	CSSELR_EL1,
+	PAR_EL1,
+	TPIDR_EL0,
+	TPIDR_EL1,
+	TPIDRRO_EL0
+};
+
+/**
+ * copy_shadow_non_trap_el1_state
+ * @vcpu:      The VCPU pointer
+ * @setup:     True, if on the way to the guest (called from setup)
+ *             False, if returning form the guet (calld from restore)
+ *
+ * Some EL1 registers are accessed directly by the virtual EL2 mode because
+ * they in no way affect execution state in virtual EL2.   However, we must
+ * still ensure that virtual EL2 observes the same state of the EL1 registers
+ * as the normal VM's EL1 mode, so copy this state as needed on setup/restore.
+ */
+static void copy_shadow_non_trap_el1_state(struct kvm_vcpu *vcpu, bool setup)
+{
+	u64 *s_sys_regs = vcpu->arch.ctxt.shadow_sys_regs;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(el1_non_trap_regs); i++) {
+		const int sr = el1_non_trap_regs[i];
+
+		if (setup)
+			s_sys_regs[sr] = vcpu_sys_reg(vcpu, sr);
+		else
+			vcpu_sys_reg(vcpu, sr) = s_sys_regs[sr];
+	}
+}
+
+static void sync_shadow_non_trap_el1_state(struct kvm_vcpu *vcpu)
+{
+	copy_shadow_non_trap_el1_state(vcpu, false);
+}
+
+static void flush_shadow_non_trap_el1_state(struct kvm_vcpu *vcpu)
+{
+	copy_shadow_non_trap_el1_state(vcpu, true);
+}
+
 static void flush_shadow_special_regs(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
@@ -162,6 +214,7 @@ void kvm_arm_setup_shadow_state(struct kvm_vcpu *vcpu)
 	if (unlikely(vcpu_mode_el2(vcpu))) {
 		flush_shadow_special_regs(vcpu);
 		flush_shadow_el1_sysregs(vcpu);
+		flush_shadow_non_trap_el1_state(vcpu);
 		ctxt->hw_sys_regs = ctxt->shadow_sys_regs;
 	} else {
 		flush_special_regs(vcpu);
@@ -176,9 +229,10 @@ void kvm_arm_setup_shadow_state(struct kvm_vcpu *vcpu)
  */
 void kvm_arm_restore_shadow_state(struct kvm_vcpu *vcpu)
 {
-	if (unlikely(vcpu_mode_el2(vcpu)))
+	if (unlikely(vcpu_mode_el2(vcpu))) {
 		sync_shadow_special_regs(vcpu);
-	else
+		sync_shadow_non_trap_el1_state(vcpu);
+	} else
 		sync_special_regs(vcpu);
 }
 
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ