[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7efc45d1-4799-972b-cad7-482bb1662fa5@amd.com>
Date: Mon, 5 Dec 2022 22:44:33 +0530
From: Santosh Shukla <santosh.shukla@....com>
To: Maxim Levitsky <mlevitsk@...hat.com>, kvm@...r.kernel.org
Cc: Sandipan Das <sandipan.das@....com>,
Paolo Bonzini <pbonzini@...hat.com>,
Jim Mattson <jmattson@...gle.com>,
Peter Zijlstra <peterz@...radead.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Borislav Petkov <bp@...en8.de>,
Pawan Gupta <pawan.kumar.gupta@...ux.intel.com>,
Thomas Gleixner <tglx@...utronix.de>,
Ingo Molnar <mingo@...hat.com>,
Josh Poimboeuf <jpoimboe@...nel.org>,
Daniel Sneddon <daniel.sneddon@...ux.intel.com>,
Jiaxi Chen <jiaxi.chen@...ux.intel.com>,
Babu Moger <babu.moger@....com>, linux-kernel@...r.kernel.org,
Jing Liu <jing2.liu@...el.com>,
Wyes Karny <wyes.karny@....com>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>,
Sean Christopherson <seanjc@...gle.com>,
Santosh Shukla <santosh.shukla@....com>
Subject: Re: [PATCH v2 11/11] KVM: nSVM: implement support for nested VNMI
On 11/30/2022 1:07 AM, Maxim Levitsky wrote:
> This patch allows L1 to use vNMI to accelerate its injection
> of NMIs to L2 by passing through vNMI int_ctl bits from vmcb12
> to/from vmcb02.
>
> While L2 runs, L1's vNMI is inhibited, and L1's NMIs are injected
> normally.
>
> In order to support nested VNMI requires saving and restoring the VNMI
> bits during nested entry and exit.
> In case of L1 and L2 both using VNMI- Copy VNMI bits from vmcb12 to
> vmcb02 during entry and vice-versa during exit.
> And in case of L1 uses VNMI and L2 doesn't- Copy VNMI bits from vmcb01 to
> vmcb02 during entry and vice-versa during exit.
>
> Tested with the KVM-unit-test and Nested Guest scenario.
>
>
> Signed-off-by: Santosh Shukla <santosh.shukla@....com>
> Signed-off-by: Maxim Levitsky <mlevitsk@...hat.com>
> ---
> arch/x86/kvm/svm/nested.c | 13 ++++++++++++-
> arch/x86/kvm/svm/svm.c | 5 +++++
> arch/x86/kvm/svm/svm.h | 6 ++++++
> 3 files changed, 23 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 5bea672bf8b12d..81346665058e26 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -278,6 +278,11 @@ static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
> if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
> return false;
>
> + if (CC((control->int_ctl & V_NMI_ENABLE) &&
> + !vmcb12_is_intercept(control, INTERCEPT_NMI))) {
> + return false;
> + }
> +
> return true;
> }
>
> @@ -427,6 +432,9 @@ void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
> if (nested_vgif_enabled(svm))
> mask |= V_GIF_MASK;
>
> + if (nested_vnmi_enabled(svm))
> + mask |= V_NMI_MASK | V_NMI_PENDING;
> +
> svm->nested.ctl.int_ctl &= ~mask;
> svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
> }
> @@ -682,8 +690,11 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
> else
> int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
>
> - if (vnmi)
> + if (vnmi) {
To avoid above change, I think we should move nested bits from 10/11 to this i.e.. move function
(nested_svm_save_vnmi and nested_svm_restore_vnmi) to this patch.
make sense?
Thanks,
Santosh
> nested_svm_save_vnmi(svm);
> + if (nested_vnmi_enabled(svm))
> + int_ctl_vmcb12_bits |= (V_NMI_PENDING | V_NMI_ENABLE | V_NMI_MASK);
> + }
>
> /* Copied from vmcb01. msrpm_base can be overwritten later. */
> vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index bf10adcf3170a8..fb203f536d2f9b 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -4214,6 +4214,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>
> svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF);
>
> + svm->vnmi_enabled = vnmi && guest_cpuid_has(vcpu, X86_FEATURE_AMD_VNMI);
> +
> svm_recalc_instruction_intercepts(vcpu, svm);
>
> /* For sev guests, the memory encryption bit is not reserved in CR3. */
> @@ -4967,6 +4969,9 @@ static __init void svm_set_cpu_caps(void)
> if (vgif)
> kvm_cpu_cap_set(X86_FEATURE_VGIF);
>
> + if (vnmi)
> + kvm_cpu_cap_set(X86_FEATURE_AMD_VNMI);
> +
> /* Nested VM can receive #VMEXIT instead of triggering #GP */
> kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
> }
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 0b7e1790fadde1..8fb2085188c5ac 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -271,6 +271,7 @@ struct vcpu_svm {
> bool pause_filter_enabled : 1;
> bool pause_threshold_enabled : 1;
> bool vgif_enabled : 1;
> + bool vnmi_enabled : 1;
>
> u32 ldr_reg;
> u32 dfr_reg;
> @@ -545,6 +546,11 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
> return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
> }
>
> +static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
> +{
> + return svm->vnmi_enabled && (svm->nested.ctl.int_ctl & V_NMI_ENABLE);
> +}
> +
> static inline bool is_x2apic_msrpm_offset(u32 offset)
> {
> /* 4 msrs per u8, and 4 u8 in u32 */
Powered by blists - more mailing lists