[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221117143242.102721-12-mlevitsk@redhat.com>
Date: Thu, 17 Nov 2022 16:32:40 +0200
From: Maxim Levitsky <mlevitsk@...hat.com>
To: kvm@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>,
Ingo Molnar <mingo@...hat.com>,
"H. Peter Anvin" <hpa@...or.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
linux-kernel@...r.kernel.org,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>,
Sandipan Das <sandipan.das@....com>,
Daniel Sneddon <daniel.sneddon@...ux.intel.com>,
Jing Liu <jing2.liu@...el.com>,
Josh Poimboeuf <jpoimboe@...nel.org>,
Wyes Karny <wyes.karny@....com>,
Borislav Petkov <bp@...en8.de>,
Babu Moger <babu.moger@....com>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
Sean Christopherson <seanjc@...gle.com>,
Jim Mattson <jmattson@...gle.com>, x86@...nel.org,
Maxim Levitsky <mlevitsk@...hat.com>,
Santosh Shukla <santosh.shukla@....com>
Subject: [PATCH 11/13] KVM: nSVM: implement nested VNMI
From: Santosh Shukla <santosh.shukla@....com>
In order to support nested VNMI requires saving and restoring the VNMI
bits during nested entry and exit.
In case of L1 and L2 both using VNMI- Copy VNMI bits from vmcb12 to
vmcb02 during entry and vice-versa during exit.
And in case of L1 uses VNMI and L2 doesn't- Copy VNMI bits from vmcb01 to
vmcb02 during entry and vice-versa during exit.
Tested with the KVM-unit-test and Nested Guest scenario.
Maxim:
- moved the vNMI bits copying to nested_sync_int_ctl_from_vmcb02
Signed-off-by: Santosh Shukla <santosh.shukla@....com>
Signed-off-by: Maxim Levitsky <mlevitsk@...hat.com>
---
arch/x86/kvm/svm/nested.c | 13 +++++++++++++
arch/x86/kvm/svm/svm.c | 5 +++++
arch/x86/kvm/svm/svm.h | 6 ++++++
3 files changed, 24 insertions(+)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 1f2b8492c8782f..c9fcdd691bb5a1 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -442,6 +442,14 @@ static void nested_sync_int_ctl_from_vmcb02(struct vcpu_svm *svm,
*/
;
+ if (vnmi) {
+ /* copy back the vNMI fields which can be modified by the CPU */
+ if (nested_vnmi_enabled(svm))
+ l2_to_l1_mask |= V_NMI_MASK | V_NMI_PENDING;
+ else
+ l2_to_l0_mask |= V_NMI_MASK | V_NMI_PENDING;
+ }
+
vmcb12->control.int_ctl =
(svm->nested.ctl.int_ctl & ~l2_to_l1_mask) |
(vmcb02->control.int_ctl & l2_to_l1_mask);
@@ -657,6 +665,11 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
else
int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
+ if (nested_vnmi_enabled(svm))
+ int_ctl_vmcb12_bits |= (V_NMI_PENDING | V_NMI_ENABLE | V_NMI_MASK);
+ else
+ int_ctl_vmcb01_bits |= (V_NMI_PENDING | V_NMI_ENABLE | V_NMI_MASK);
+
/* Copied from vmcb01. msrpm_base can be overwritten later. */
vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9ebfbd0d4b467e..c9190a8ee03273 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4188,6 +4188,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF);
+ svm->vnmi_enabled = vnmi && guest_cpuid_has(vcpu, X86_FEATURE_AMD_VNMI);
+
svm_recalc_instruction_intercepts(vcpu, svm);
/* For sev guests, the memory encryption bit is not reserved in CR3. */
@@ -4939,6 +4941,9 @@ static __init void svm_set_cpu_caps(void)
if (vgif)
kvm_cpu_cap_set(X86_FEATURE_VGIF);
+ if (vnmi)
+ kvm_cpu_cap_set(X86_FEATURE_AMD_VNMI);
+
/* Nested VM can receive #VMEXIT instead of triggering #GP */
kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 5f2ee72c6e3125..d39e937a2c8391 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -252,6 +252,7 @@ struct vcpu_svm {
bool pause_filter_enabled : 1;
bool pause_threshold_enabled : 1;
bool vgif_enabled : 1;
+ bool vnmi_enabled : 1;
u32 ldr_reg;
u32 dfr_reg;
@@ -532,6 +533,11 @@ static inline bool is_x2apic_msrpm_offset(u32 offset)
(msr < (APIC_BASE_MSR + 0x100));
}
+static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
+{
+ return svm->vnmi_enabled && (svm->nested.ctl.int_ctl & V_NMI_ENABLE);
+}
+
static inline struct vmcb *get_vnmi_vmcb(struct vcpu_svm *svm)
{
if (!vnmi)
--
2.34.3
Powered by blists - more mailing lists