lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 23 Aug 2017 22:43:57 +0200
From:   Paolo Bonzini <pbonzini@...hat.com>
To:     linux-kernel@...r.kernel.org, kvm@...r.kernel.org
Cc:     wanpeng.li@...mail.com, david@...hat.com, rkrcmar@...hat.com,
        jmattson@...gle.com
Subject: [PATCH 3/4] KVM: x86: pass struct kvm_queued_exception to kvm_multiple_exception

Avoid early overwriting of vcpu->arch.exception.nested_apf, and
make it easier to add CR2 or DR6 in the future.

Signed-off-by: Paolo Bonzini <pbonzini@...hat.com>
---
 arch/x86/kvm/x86.c | 79 ++++++++++++++++++++++++++++++++----------------------
 1 file changed, 47 insertions(+), 32 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4e699238a113..88b91114c5a8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -381,25 +381,14 @@ static int exception_type(int vector)
 }
 
 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
-		unsigned nr, bool has_error, u32 error_code,
-		bool reinject)
+                                   struct kvm_queued_exception ex)
 {
 	u32 prev_nr;
 	int class1, class2;
 
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
-
-	if (!vcpu->arch.exception.pending) {
-	queue:
-		if (has_error && !is_protmode(vcpu))
-			has_error = false;
-		vcpu->arch.exception.pending = true;
-		vcpu->arch.exception.has_error_code = has_error;
-		vcpu->arch.exception.nr = nr;
-		vcpu->arch.exception.error_code = error_code;
-		vcpu->arch.exception.reinject = reinject;
-		return;
-	}
+	if (!vcpu->arch.exception.pending)
+		goto queue;
 
 	/* to check exception */
 	prev_nr = vcpu->arch.exception.nr;
@@ -409,30 +398,43 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
 		return;
 	}
 	class1 = exception_class(prev_nr);
-	class2 = exception_class(nr);
+	class2 = exception_class(ex.nr);
 	if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
 		|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
 		/* generate double fault per SDM Table 5-5 */
-		vcpu->arch.exception.pending = true;
-		vcpu->arch.exception.has_error_code = true;
-		vcpu->arch.exception.nr = DF_VECTOR;
-		vcpu->arch.exception.error_code = 0;
-	} else
-		/* replace previous exception with a new one in a hope
-		   that instruction re-execution will regenerate lost
-		   exception */
-		goto queue;
+		ex.pending = true;
+		ex.has_error_code = true;
+		ex.nr = DF_VECTOR;
+		ex.error_code = 0;
+	}
+
+	/*
+	 * Else replace previous exception with a new one in a hope
+	 * that instruction re-execution will regenerate lost
+	 * exception.
+	 */
+
+queue:
+	ex.pending = true;
+	ex.has_error_code = ex.has_error_code && is_protmode(vcpu);
+	vcpu->arch.exception = ex;
+	return;
 }
 
 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
 {
-	kvm_multiple_exception(vcpu, nr, false, 0, false);
+	kvm_multiple_exception(vcpu, ((struct kvm_queued_exception) {
+		.nr = nr
+	}));
 }
 EXPORT_SYMBOL_GPL(kvm_queue_exception);
 
 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
 {
-	kvm_multiple_exception(vcpu, nr, false, 0, true);
+	kvm_multiple_exception(vcpu, ((struct kvm_queued_exception) {
+		.nr = nr,
+		.reinject = true
+	}));
 }
 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
 
@@ -449,14 +451,18 @@ int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
 
 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
 {
+	bool nested_apf = is_guest_mode(vcpu) && fault->async_page_fault;
 	++vcpu->stat.pf_guest;
-	vcpu->arch.exception.nested_apf =
-		is_guest_mode(vcpu) && fault->async_page_fault;
-	if (vcpu->arch.exception.nested_apf)
+	if (nested_apf)
 		vcpu->arch.apf.nested_apf_token = fault->address;
 	else
 		vcpu->arch.cr2 = fault->address;
-	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
+	kvm_multiple_exception(vcpu, ((struct kvm_queued_exception) {
+		.nr = PF_VECTOR,
+		.nested_apf = nested_apf,
+		.has_error_code = true,
+		.error_code = fault->error_code
+	}));
 }
 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
 
@@ -479,13 +485,22 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu)
 
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
 {
-	kvm_multiple_exception(vcpu, nr, true, error_code, false);
+	kvm_multiple_exception(vcpu, ((struct kvm_queued_exception) {
+		.nr = nr,
+		.has_error_code = true,
+		.error_code = error_code
+	}));
 }
 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
 
 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
 {
-	kvm_multiple_exception(vcpu, nr, true, error_code, true);
+	kvm_multiple_exception(vcpu, ((struct kvm_queued_exception) {
+		.nr = nr,
+		.has_error_code = true,
+		.error_code = error_code,
+		.reinject = true
+	}));
 }
 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
 
-- 
1.8.3.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ