lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250923050317.205482-12-Neeraj.Upadhyay@amd.com>
Date: Tue, 23 Sep 2025 10:33:11 +0530
From: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
To: <kvm@...r.kernel.org>, <seanjc@...gle.com>, <pbonzini@...hat.com>
CC: <linux-kernel@...r.kernel.org>, <Thomas.Lendacky@....com>,
	<nikunj@....com>, <Santosh.Shukla@....com>, <Vasant.Hegde@....com>,
	<Suravee.Suthikulpanit@....com>, <bp@...en8.de>, <David.Kaplan@....com>,
	<huibo.wang@....com>, <naveen.rao@....com>, <tiala@...rosoft.com>
Subject: [RFC PATCH v2 11/17] KVM: SVM: Enable NMI support for Secure AVIC guests

The Secure AVIC hardware introduces a new model for handling Non-Maskable
Interrupts (NMIs). This model differs significantly from standard SVM, as
guest NMI state is managed by the hardware and is not visible to KVM.

Consequently, KVM can no longer use the generic EVENT_INJ mechanism and
must not track NMI masking state in software. Instead, it must adopt the
vNMI (Virtual NMI) flow, which is the only mechanism supported by
Secure AVIC.

Enable NMI support by making three key changes:

1.  Enable NMI in VMSA: Set the V_NMI_ENABLE_MASK bit in the VMSA's
    vintr_ctr field. This is a hardware prerequisite to enable the
    vNMI feature for the guest.

2.  Use vNMI for Injection: Modify svm_inject_nmi() to use the vNMI
    flow for Secure AVIC guests. When an NMI is requested, set the
    V_NMI_PENDING_MASK in the VMCB instead of using EVENT_INJ.

3.  Update NMI Windowing: Modify svm_nmi_allowed() to reflect that
    hardware now manages NMI blocking. KVM's only responsibility is to
    avoid queuing a new vNMI if one is already pending. The check is
    now simplified to whether V_NMI_PENDING_MASK is already set.

Co-developed-by: Kishon Vijay Abraham I <kvijayab@....com>
Signed-off-by: Kishon Vijay Abraham I <kvijayab@....com>
Signed-off-by: Neeraj Upadhyay <Neeraj.Upadhyay@....com>
---
 arch/x86/kvm/svm/sev.c |  2 +-
 arch/x86/kvm/svm/svm.c | 56 ++++++++++++++++++++++++++----------------
 2 files changed, 36 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 2dee210efb37..7c66aefe428a 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -885,7 +885,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
 	save->sev_features = sev->vmsa_features;
 
 	if (sev_savic_active(vcpu->kvm))
-		save->vintr_ctrl |= V_GIF_MASK;
+		save->vintr_ctrl |= V_GIF_MASK | V_NMI_ENABLE_MASK;
 
 	/*
 	 * Skip FPU and AVX setup with KVM_SEV_ES_INIT to avoid
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index fdd612c975ae..a945bc094c1a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3635,27 +3635,6 @@ static int pre_svm_run(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
-static void svm_inject_nmi(struct kvm_vcpu *vcpu)
-{
-	struct vcpu_svm *svm = to_svm(vcpu);
-
-	svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
-
-	if (svm->nmi_l1_to_l2)
-		return;
-
-	/*
-	 * No need to manually track NMI masking when vNMI is enabled, hardware
-	 * automatically sets V_NMI_BLOCKING_MASK as appropriate, including the
-	 * case where software directly injects an NMI.
-	 */
-	if (!is_vnmi_enabled(svm)) {
-		svm->nmi_masked = true;
-		svm_set_iret_intercept(svm);
-	}
-	++vcpu->stat.nmi_injections;
-}
-
 static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -3689,6 +3668,33 @@ static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
 	return true;
 }
 
+static void svm_inject_nmi(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	if (sev_savic_active(vcpu->kvm)) {
+		svm_set_vnmi_pending(vcpu);
+		++vcpu->stat.nmi_injections;
+		return;
+	}
+
+	svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
+
+	if (svm->nmi_l1_to_l2)
+		return;
+
+	/*
+	 * No need to manually track NMI masking when vNMI is enabled, hardware
+	 * automatically sets V_NMI_BLOCKING_MASK as appropriate, including the
+	 * case where software directly injects an NMI.
+	 */
+	if (!is_vnmi_enabled(svm)) {
+		svm->nmi_masked = true;
+		svm_set_iret_intercept(svm);
+	}
+	++vcpu->stat.nmi_injections;
+}
+
 static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
@@ -3836,6 +3842,14 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
 static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
+
+	/* Secure AVIC only support V_NMI based NMI injection. */
+	if (sev_savic_active(vcpu->kvm)) {
+		if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK)
+			return 0;
+		return 1;
+	}
+
 	if (svm->nested.nested_run_pending)
 		return -EBUSY;
 
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ