lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221117143242.102721-3-mlevitsk@redhat.com>
Date:   Thu, 17 Nov 2022 16:32:31 +0200
From:   Maxim Levitsky <mlevitsk@...hat.com>
To:     kvm@...r.kernel.org
Cc:     Paolo Bonzini <pbonzini@...hat.com>,
        Ingo Molnar <mingo@...hat.com>,
        "H. Peter Anvin" <hpa@...or.com>,
        Dave Hansen <dave.hansen@...ux.intel.com>,
        linux-kernel@...r.kernel.org,
        Peter Zijlstra <peterz@...radead.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Sandipan Das <sandipan.das@....com>,
        Daniel Sneddon <daniel.sneddon@...ux.intel.com>,
        Jing Liu <jing2.liu@...el.com>,
        Josh Poimboeuf <jpoimboe@...nel.org>,
        Wyes Karny <wyes.karny@....com>,
        Borislav Petkov <bp@...en8.de>,
        Babu Moger <babu.moger@....com>,
        Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
        Sean Christopherson <seanjc@...gle.com>,
        Jim Mattson <jmattson@...gle.com>, x86@...nel.org,
        Maxim Levitsky <mlevitsk@...hat.com>
Subject: [PATCH 02/13] KVM: nSVM: don't call nested_sync_control_from_vmcb02 on each VM exit

Calling nested_sync_control_from_vmcb02 on each VM exit (nested or not),
was an attempt to keep the int_ctl field in the vmcb12 cache
up to date on each VM exit.

However all other fields in the vmcb12 cache are not kept up to	 date,
therefore for consistency it is better to do this on a nested VM exit only.

No functional change intended.

Signed-off-by: Maxim Levitsky <mlevitsk@...hat.com>
---
 arch/x86/kvm/svm/nested.c | 17 ++++++++---------
 arch/x86/kvm/svm/svm.c    |  2 --
 arch/x86/kvm/svm/svm.h    |  1 -
 3 files changed, 8 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 43cc4a5d22e012..91a51e75717dca 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -407,11 +407,12 @@ void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
  * Synchronize fields that are written by the processor, so that
  * they can be copied back into the vmcb12.
  */
-void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
+static void nested_sync_control_from_vmcb02(struct vcpu_svm *svm,
+					    struct vmcb *vmcb12)
 {
 	u32 mask;
-	svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
-	svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;
+	vmcb12->control.event_inj      = svm->vmcb->control.event_inj;
+	vmcb12->control.event_inj_err  = svm->vmcb->control.event_inj_err;
 
 	/* Only a few fields of int_ctl are written by the processor.  */
 	mask = V_IRQ_MASK | V_TPR_MASK;
@@ -431,8 +432,8 @@ void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
 	if (nested_vgif_enabled(svm))
 		mask |= V_GIF_MASK;
 
-	svm->nested.ctl.int_ctl        &= ~mask;
-	svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
+	vmcb12->control.int_ctl        &= ~mask;
+	vmcb12->control.int_ctl        |= svm->vmcb->control.int_ctl & mask;
 }
 
 /*
@@ -985,13 +986,11 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
 	if (vmcb12->control.exit_code != SVM_EXIT_ERR)
 		nested_save_pending_event_to_vmcb12(svm, vmcb12);
 
+	nested_sync_control_from_vmcb02(svm, vmcb12);
+
 	if (svm->nrips_enabled)
 		vmcb12->control.next_rip  = vmcb02->control.next_rip;
 
-	vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
-	vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
-	vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
-
 	if (!kvm_pause_in_guest(vcpu->kvm)) {
 		vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
 		vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 527f18d8cc4489..03acbe8ff34edb 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4016,8 +4016,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 
 	svm->next_rip = 0;
 	if (is_guest_mode(vcpu)) {
-		nested_sync_control_from_vmcb02(svm);
-
 		/* Track VMRUNs that have made past consistency checking */
 		if (svm->nested.nested_run_pending &&
 		    svm->vmcb->control.exit_code != SVM_EXIT_ERR)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 199a2ecef1cec6..f5383104d00580 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -618,7 +618,6 @@ void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
 				       struct vmcb_control_area *control);
 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
 				    struct vmcb_save_area *save);
-void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
 
-- 
2.34.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ