lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 19 May 2021 20:23:03 +0200
From:   Stefano De Venuto <stefano.devenuto99@...il.com>
To:     linux-kernel@...r.kernel.org
Cc:     pbonzini@...hat.com, seanjc@...gle.com, vkuznets@...hat.com,
        wanpengli@...cent.com, jmattson@...gle.com, x86@...nel.org,
        hpa@...or.com, kvm@...r.kernel.org, rostedt@...dmis.org,
        y.karadz@...il.com,
        Stefano De Venuto <stefano.devenuto99@...il.com>,
        Dario Faggioli <dfaggioli@...e.com>
Subject: [PATCH] Move VMEnter and VMExit tracepoints closer to the actual event

The kvm_entry and kvm_exit tracepoints are still quite far from the
actual VMEnters/VMExits. This means that in a trace we can find host
events after a kvm_entry event and before a kvm_exit one, as in this
example:

           trace.dat:  CPU 0/KVM-4594  [001]  2.167191: kvm_entry:
           trace.dat:  CPU 0/KVM-4594  [001]  2.167192: write_msr: 48, value 0
           trace.dat:  CPU 0/KVM-4594  [001]  2.167192: rcu_utilization: Start context switch
           trace.dat:  CPU 0/KVM-4594  [001]  2.167192: rcu_utilization: End context switch
trace-tumbleweed.dat:     <idle>-0     [000]  2.167196: hrtimer_cancel:
trace-tumbleweed.dat:     <idle>-0     [000]  2.167197: hrtimer_expire_entry:
trace-tumbleweed.dat:     <idle>-0     [000]  2.167201: hrtimer_expire_exit:
trace-tumbleweed.dat:     <idle>-0     [000]  2.167201: hrtimer_start:
           trace.dat:  CPU 0/KVM-4594  [001]  2.167203: read_msr: 48, value 0
           trace.dat:  CPU 0/KVM-4594  [001]  2.167203: write_msr: 48, value 4
           trace.dat:  CPU 0/KVM-4594  [001]  2.167204: kvm_exit: 

This patch moves the tracepoints closer to the events, for both Intel
and AMD, so that a combined host-guest trace will offer a more
realistic representation of what is really happening, as shown here:

           trace.dat:  CPU 0/KVM-2553  [000]  2.190290: write_msr: 48, value 0
           trace.dat:  CPU 0/KVM-2553  [000]  2.190290: rcu_utilization: Start context switch
           trace.dat:  CPU 0/KVM-2553  [000]  2.190290: rcu_utilization: End context switch
           trace.dat:  CPU 0/KVM-2553  [000]  2.190290: kvm_entry:
trace-tumbleweed.dat:     <idle>-0     [000]  2.190290: write_msr:
trace-tumbleweed.dat:     <idle>-0     [000]  2.190290: cpu_idle:
           trace.dat:  CPU 0/KVM-2553  [000]  2.190291: kvm_exit:
           trace.dat:  CPU 0/KVM-2553  [000]  2.190291: read_msr: 48, value 0
           trace.dat:  CPU 0/KVM-2553  [000]  2.190291: write_msr: 48, value 4 

Signed-off-by: Stefano De Venuto <stefano.devenuto99@...il.com>
Signed-off-by: Dario Faggioli <dfaggioli@...e.com>
---
 arch/x86/kvm/svm/svm.c |  8 ++++----
 arch/x86/kvm/vmx/vmx.c | 10 +++++-----
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 05eca131eaf2..c77d4866e239 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3275,8 +3275,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
 	struct kvm_run *kvm_run = vcpu->run;
 	u32 exit_code = svm->vmcb->control.exit_code;
 
-	trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
-
 	/* SEV-ES guests must use the CR write traps to track CR registers. */
 	if (!sev_es_guest(vcpu->kvm)) {
 		if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
@@ -3707,6 +3705,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 
 	kvm_guest_enter_irqoff();
 
+	trace_kvm_entry(vcpu);
+
 	if (sev_es_guest(vcpu->kvm)) {
 		__svm_sev_es_vcpu_run(vmcb_pa);
 	} else {
@@ -3725,6 +3725,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 		vmload(__sme_page_pa(sd->save_area));
 	}
 
+	trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
+
 	kvm_guest_exit_irqoff();
 }
 
@@ -3732,8 +3734,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	trace_kvm_entry(vcpu);
-
 	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
 	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
 	svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4bceb5ca3a89..33c732101b83 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6661,6 +6661,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 {
 	kvm_guest_enter_irqoff();
 
+	trace_kvm_entry(vcpu);
+
 	/* L1D Flush includes CPU buffer clear to mitigate MDS */
 	if (static_branch_unlikely(&vmx_l1d_should_flush))
 		vmx_l1d_flush(vcpu);
@@ -6675,6 +6677,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 
 	vcpu->arch.cr2 = native_read_cr2();
 
+	vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
+	trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX);
+
 	kvm_guest_exit_irqoff();
 }
 
@@ -6693,8 +6698,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	if (vmx->emulation_required)
 		return EXIT_FASTPATH_NONE;
 
-	trace_kvm_entry(vcpu);
-
 	if (vmx->ple_window_dirty) {
 		vmx->ple_window_dirty = false;
 		vmcs_write32(PLE_WINDOW, vmx->ple_window);
@@ -6814,15 +6817,12 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 		return EXIT_FASTPATH_NONE;
 	}
 
-	vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
 	if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
 		kvm_machine_check();
 
 	if (likely(!vmx->exit_reason.failed_vmentry))
 		vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
 
-	trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX);
-
 	if (unlikely(vmx->exit_reason.failed_vmentry))
 		return EXIT_FASTPATH_NONE;
 
-- 
2.31.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ