[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221129193717.513824-7-mlevitsk@redhat.com>
Date: Tue, 29 Nov 2022 21:37:12 +0200
From: Maxim Levitsky <mlevitsk@...hat.com>
To: kvm@...r.kernel.org
Cc: Sandipan Das <sandipan.das@....com>,
Paolo Bonzini <pbonzini@...hat.com>,
Jim Mattson <jmattson@...gle.com>,
Peter Zijlstra <peterz@...radead.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Borislav Petkov <bp@...en8.de>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Josh Poimboeuf <jpoimboe@...nel.org>,
Daniel Sneddon <daniel.sneddon@...ux.intel.com>,
Jiaxi Chen <jiaxi.chen@...ux.intel.com>,
Babu Moger <babu.moger@....com>, linux-kernel@...r.kernel.org,
Jing Liu <jing2.liu@...el.com>,
Wyes Karny <wyes.karny@....com>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
Sean Christopherson <seanjc@...gle.com>,
Maxim Levitsky <mlevitsk@...hat.com>
Subject: [PATCH v2 06/11] KVM: SVM: add wrappers to enable/disable IRET interception
SEV-ES guests don't use IRET interception for the detection of
an end of a NMI.
Therefore it makes sense to create a wrapper to avoid repeating
the check for the SEV-ES.
No functional change is intended.
Suggested-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Maxim Levitsky <mlevitsk@...hat.com>
---
arch/x86/kvm/svm/svm.c | 28 +++++++++++++++++++---------
1 file changed, 19 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 512b2aa21137e2..cfed6ab29c839a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2468,16 +2468,29 @@ static int task_switch_interception(struct kvm_vcpu *vcpu)
has_error_code, error_code);
}
+static void svm_disable_iret_interception(struct vcpu_svm *svm)
+{
+ if (!sev_es_guest(svm->vcpu.kvm))
+ svm_clr_intercept(svm, INTERCEPT_IRET);
+}
+
+static void svm_enable_iret_interception(struct vcpu_svm *svm)
+{
+ if (!sev_es_guest(svm->vcpu.kvm))
+ svm_set_intercept(svm, INTERCEPT_IRET);
+}
+
static int iret_interception(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
++vcpu->stat.nmi_window_exits;
svm->awaiting_iret_completion = true;
- if (!sev_es_guest(vcpu->kvm)) {
- svm_clr_intercept(svm, INTERCEPT_IRET);
+
+ svm_disable_iret_interception(svm);
+ if (!sev_es_guest(vcpu->kvm))
svm->nmi_iret_rip = kvm_rip_read(vcpu);
- }
+
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 1;
}
@@ -3470,8 +3483,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
return;
svm->nmi_masked = true;
- if (!sev_es_guest(vcpu->kvm))
- svm_set_intercept(svm, INTERCEPT_IRET);
+ svm_enable_iret_interception(svm);
++vcpu->stat.nmi_injections;
}
@@ -3614,12 +3626,10 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
if (masked) {
svm->nmi_masked = true;
- if (!sev_es_guest(vcpu->kvm))
- svm_set_intercept(svm, INTERCEPT_IRET);
+ svm_enable_iret_interception(svm);
} else {
svm->nmi_masked = false;
- if (!sev_es_guest(vcpu->kvm))
- svm_clr_intercept(svm, INTERCEPT_IRET);
+ svm_disable_iret_interception(svm);
}
}
--
2.26.3
Powered by blists - more mailing lists