[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250211025828.3072076-16-binbin.wu@linux.intel.com>
Date: Tue, 11 Feb 2025 10:58:26 +0800
From: Binbin Wu <binbin.wu@...ux.intel.com>
To: pbonzini@...hat.com,
seanjc@...gle.com,
kvm@...r.kernel.org
Cc: rick.p.edgecombe@...el.com,
kai.huang@...el.com,
adrian.hunter@...el.com,
reinette.chatre@...el.com,
xiaoyao.li@...el.com,
tony.lindgren@...el.com,
isaku.yamahata@...el.com,
yan.y.zhao@...el.com,
chao.gao@...el.com,
linux-kernel@...r.kernel.org,
binbin.wu@...ux.intel.com
Subject: [PATCH v2 15/17] KVM: VMX: Add a helper for NMI handling
From: Sean Christopherson <sean.j.christopherson@...el.com>
Add a helper to handles NMI exit.
TDX handles the NMI exit the same as VMX case. Add a helper to share the
code with TDX, expose the helper in common.h.
No functional change intended.
Signed-off-by: Sean Christopherson <sean.j.christopherson@...el.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@...el.com>
Co-developed-by: Binbin Wu <binbin.wu@...ux.intel.com>
Signed-off-by: Binbin Wu <binbin.wu@...ux.intel.com>
---
TDX interrupts v2:
- Renamed from "KVM: VMX: Move NMI/exception handler to common helper".
- Revert the unnecessary move, because in later patch TDX will reuse
vmx_handle_exit_irqoff() as handle_exit_irqoff() callback.
- Add the check for NMI to __vmx_handle_nmi() and rename it to vmx_handle_nmi().
- Update change log according to the change.
TDX interrupts v1:
- Update change log with suggestions from (Binbin)
- Move the NMI handling code to common header and add a helper
__vmx_handle_nmi() for it. (Binbin)
---
arch/x86/kvm/vmx/common.h | 2 ++
arch/x86/kvm/vmx/vmx.c | 24 +++++++++++++++---------
2 files changed, 17 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kvm/vmx/common.h b/arch/x86/kvm/vmx/common.h
index f26f7b1acbca..67b16bd8a788 100644
--- a/arch/x86/kvm/vmx/common.h
+++ b/arch/x86/kvm/vmx/common.h
@@ -180,4 +180,6 @@ static inline void __vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu,
kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
}
+noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu);
+
#endif /* __KVM_X86_VMX_COMMON_H */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 012649688e46..228a7e51b6a5 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7212,6 +7212,20 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
}
}
+noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu)
+{
+ if ((u16)vmx_get_exit_reason(vcpu).basic != EXIT_REASON_EXCEPTION_NMI ||
+ !is_nmi(vmx_get_intr_info(vcpu)))
+ return;
+
+ kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
+ if (cpu_feature_enabled(X86_FEATURE_FRED))
+ fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
+ else
+ vmx_do_nmi_irqoff();
+ kvm_after_interrupt(vcpu);
+}
+
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
unsigned int flags)
{
@@ -7255,15 +7269,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
if (likely(!vmx_get_exit_reason(vcpu).failed_vmentry))
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
- if ((u16)vmx_get_exit_reason(vcpu).basic == EXIT_REASON_EXCEPTION_NMI &&
- is_nmi(vmx_get_intr_info(vcpu))) {
- kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
- if (cpu_feature_enabled(X86_FEATURE_FRED))
- fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
- else
- vmx_do_nmi_irqoff();
- kvm_after_interrupt(vcpu);
- }
+ vmx_handle_nmi(vcpu);
out:
guest_state_exit_irqoff();
--
2.46.0
Powered by blists - more mailing lists