[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220709134230.2397-7-santosh.shukla@amd.com>
Date: Sat, 9 Jul 2022 19:12:29 +0530
From: Santosh Shukla <santosh.shukla@....com>
To: Paolo Bonzini <pbonzini@...hat.com>
CC: Sean Christopherson <seanjc@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>,
Maxim Levitsky <mlevitsk@...hat.com>,
Tom Lendacky <thomas.lendacky@....com>, <kvm@...r.kernel.org>,
<linux-kernel@...r.kernel.org>, <Santosh.Shukla@....com>
Subject: [PATCHv2 6/7] KVM: nSVM: implement nested VNMI
Currently nested_vmcb02_prepare_control func checks and programs bits
(V_TPR,_INTR, _IRQ) in nested mode, To support nested VNMI,
extending the check for VNMI bits if VNMI is enabled and
on vmexit save V_NMI_MASK/PENDING state to vmcb12.
Tested with the KVM-unit-test and Nested Guest scenario.
Signed-off-by: Santosh Shukla <santosh.shukla@....com>
---
v2:
- Save the V_NMI_PENDING/MASK state in vmcb12 on vmexit.
- Tested nested environment - L1 injecting vnmi to L2.
- Tested vnmi in kvm-unit-test.
arch/x86/kvm/svm/nested.c | 16 +++++++++++++++-
arch/x86/kvm/svm/svm.c | 5 +++++
arch/x86/kvm/svm/svm.h | 6 ++++++
3 files changed, 26 insertions(+), 1 deletion(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index ba7cd26f438f..f97651f317a0 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -629,6 +629,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
else
int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
+ if (nested_vnmi_enabled(svm))
+ int_ctl_vmcb12_bits |= (V_NMI_PENDING | V_NMI_ENABLE);
+
/* Copied from vmcb01. msrpm_base can be overwritten later. */
vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
@@ -951,10 +954,21 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb12->control.event_inj = svm->nested.ctl.event_inj;
vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
+ if (nested_vnmi_enabled(svm)) {
+ if (vmcb02->control.int_ctl & V_NMI_MASK)
+ vmcb12->control.int_ctl |= V_NMI_MASK;
+ else
+ vmcb12->control.int_ctl &= ~V_NMI_MASK;
+
+ if (vmcb02->control.int_ctl & V_NMI_PENDING)
+ vmcb12->control.int_ctl |= V_NMI_PENDING;
+ else
+ vmcb12->control.int_ctl &= ~V_NMI_PENDING;
+ }
+
if (!kvm_pause_in_guest(vcpu->kvm)) {
vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
-
}
nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index c73a1809a7c7..d81ad913542d 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4069,6 +4069,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF);
+ svm->vnmi_enabled = vnmi && guest_cpuid_has(vcpu, X86_FEATURE_V_NMI);
+
svm_recalc_instruction_intercepts(vcpu, svm);
/* For sev guests, the memory encryption bit is not reserved in CR3. */
@@ -4827,6 +4829,9 @@ static __init void svm_set_cpu_caps(void)
if (vgif)
kvm_cpu_cap_set(X86_FEATURE_VGIF);
+ if (vnmi)
+ kvm_cpu_cap_set(X86_FEATURE_V_NMI);
+
/* Nested VM can receive #VMEXIT instead of triggering #GP */
kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index f36e30df6202..0094ca2698fa 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -241,6 +241,7 @@ struct vcpu_svm {
bool pause_filter_enabled : 1;
bool pause_threshold_enabled : 1;
bool vgif_enabled : 1;
+ bool vnmi_enabled : 1;
u32 ldr_reg;
u32 dfr_reg;
@@ -510,6 +511,11 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
}
+static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
+{
+ return svm->vnmi_enabled && (svm->nested.ctl.int_ctl & V_NMI_ENABLE);
+}
+
static inline struct vmcb *get_vnmi_vmcb(struct vcpu_svm *svm)
{
if (!vnmi)
--
2.25.1
Powered by blists - more mailing lists