[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <22c4fa84b033ecd7a984067a3cb53a20b6c0b269.camel@redhat.com>
Date: Wed, 11 May 2022 14:25:17 +0300
From: Maxim Levitsky <mlevitsk@...hat.com>
To: Vitaly Kuznetsov <vkuznets@...hat.com>, kvm@...r.kernel.org,
Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Michael Kelley <mikelley@...rosoft.com>,
Siddharth Chandrasekaran <sidcha@...zon.de>,
linux-hyperv@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 12/34] KVM: nVMX: Keep track of hv_vm_id/hv_vp_id
when eVMCS is in use
On Thu, 2022-04-14 at 15:19 +0200, Vitaly Kuznetsov wrote:
> To handle L2 TLB flush requests, KVM needs to keep track of L2's VM_ID/
> VP_IDs which are set by L1 hypervisor. 'Partition assist page' address is
> also needed to handle post-flush exit to L1 upon request.
>
> Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
> ---
> arch/x86/include/asm/kvm_host.h | 6 ++++++
> arch/x86/kvm/vmx/nested.c | 15 +++++++++++++++
> 2 files changed, 21 insertions(+)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 837c07e213de..8b2a52bf26c0 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -622,6 +622,12 @@ struct kvm_vcpu_hv {
>
> /* Preallocated buffer for handling hypercalls passing sparse vCPU set */
> u64 sparse_banks[64];
> +
> + struct {
> + u64 pa_page_gpa;
> + u64 vm_id;
> + u32 vp_id;
> + } nested;
> };
>
> /* Xen HVM per vcpu emulation context */
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index a6688663da4d..ee88921c6156 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -225,6 +225,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
>
> static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
> {
> + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
> struct vcpu_vmx *vmx = to_vmx(vcpu);
>
> if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
> @@ -233,6 +234,12 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
> }
>
> vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
> +
> + if (hv_vcpu) {
> + hv_vcpu->nested.pa_page_gpa = INVALID_GPA;
> + hv_vcpu->nested.vm_id = 0;
> + hv_vcpu->nested.vp_id = 0;
> + }
> }
>
> static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
> @@ -1591,11 +1598,19 @@ static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields
> {
> struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
> struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
> + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu);
>
> /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
> vmcs12->tpr_threshold = evmcs->tpr_threshold;
> vmcs12->guest_rip = evmcs->guest_rip;
>
> + if (unlikely(!(hv_clean_fields &
> + HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL))) {
> + hv_vcpu->nested.pa_page_gpa = evmcs->partition_assist_page;
> + hv_vcpu->nested.vm_id = evmcs->hv_vm_id;
> + hv_vcpu->nested.vp_id = evmcs->hv_vp_id;
> + }
> +
> if (unlikely(!(hv_clean_fields &
> HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
> vmcs12->guest_rsp = evmcs->guest_rsp;
Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>
Best regards,
Maxim Levitsky
Powered by blists - more mailing lists