[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e249bd60cef49706e39cc619876e26c90d88ecb0.camel@redhat.com>
Date: Tue, 07 Jun 2022 12:56:02 +0300
From: Maxim Levitsky <mlevitsk@...hat.com>
To: Vitaly Kuznetsov <vkuznets@...hat.com>, kvm@...r.kernel.org,
Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Michael Kelley <mikelley@...rosoft.com>,
Siddharth Chandrasekaran <sidcha@...zon.de>,
Yuan Yao <yuan.yao@...ux.intel.com>,
linux-hyperv@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v6 20/38] KVM: nVMX: hyper-v: Cache VP assist page in
'struct kvm_vcpu_hv'
On Mon, 2022-06-06 at 10:36 +0200, Vitaly Kuznetsov wrote:
> In preparation to enabling L2 TLB flush, cache VP assist page in
> 'struct kvm_vcpu_hv'. While on it, rename nested_enlightened_vmentry()
> to nested_get_evmptr() and make it return eVMCS GPA directly.
>
> No functional change intended.
>
> Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
> ---
> arch/x86/include/asm/kvm_host.h | 2 ++
> arch/x86/kvm/hyperv.c | 10 ++++++----
> arch/x86/kvm/hyperv.h | 3 +--
> arch/x86/kvm/vmx/evmcs.c | 21 +++++++--------------
> arch/x86/kvm/vmx/evmcs.h | 2 +-
> arch/x86/kvm/vmx/nested.c | 6 +++---
> 6 files changed, 20 insertions(+), 24 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index f9a34af0a5cc..e62db76c8d37 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -650,6 +650,8 @@ struct kvm_vcpu_hv {
> /* Preallocated buffer for handling hypercalls passing sparse vCPU set */
> u64 sparse_banks[HV_MAX_SPARSE_VCPU_BANKS];
>
> + struct hv_vp_assist_page vp_assist_page;
> +
> struct {
> u64 pa_page_gpa;
> u64 vm_id;
> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> index 4396d75588d8..91310774c0b9 100644
> --- a/arch/x86/kvm/hyperv.c
> +++ b/arch/x86/kvm/hyperv.c
> @@ -903,13 +903,15 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
> }
> EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
>
> -bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
> - struct hv_vp_assist_page *assist_page)
> +bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
> {
> - if (!kvm_hv_assist_page_enabled(vcpu))
> + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
> +
> + if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu))
> return false;
> +
> return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
> - assist_page, sizeof(*assist_page));
> + &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page));
> }
> EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
>
> diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
> index 2aa6fb7fc599..139beb55b781 100644
> --- a/arch/x86/kvm/hyperv.h
> +++ b/arch/x86/kvm/hyperv.h
> @@ -105,8 +105,7 @@ int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
> void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
>
> bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
> -bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
> - struct hv_vp_assist_page *assist_page);
> +bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu);
>
> static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu,
> int timer_index)
> diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
> index 805afc170b5b..7cd7b16942c6 100644
> --- a/arch/x86/kvm/vmx/evmcs.c
> +++ b/arch/x86/kvm/vmx/evmcs.c
> @@ -307,24 +307,17 @@ __init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
> }
> #endif
>
> -bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa)
> +u64 nested_get_evmptr(struct kvm_vcpu *vcpu)
> {
> - struct hv_vp_assist_page assist_page;
> + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
>
> - *evmcs_gpa = -1ull;
> + if (unlikely(!kvm_hv_get_assist_page(vcpu)))
> + return EVMPTR_INVALID;
>
> - if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
> - return false;
> + if (unlikely(!hv_vcpu->vp_assist_page.enlighten_vmentry))
> + return EVMPTR_INVALID;
>
> - if (unlikely(!assist_page.enlighten_vmentry))
> - return false;
> -
> - if (unlikely(!evmptr_is_valid(assist_page.current_nested_vmcs)))
> - return false;
> -
> - *evmcs_gpa = assist_page.current_nested_vmcs;
> -
> - return true;
> + return hv_vcpu->vp_assist_page.current_nested_vmcs;
> }
>
> uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
> diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
> index 584741b85eb6..22d238b36238 100644
> --- a/arch/x86/kvm/vmx/evmcs.h
> +++ b/arch/x86/kvm/vmx/evmcs.h
> @@ -239,7 +239,7 @@ enum nested_evmptrld_status {
> EVMPTRLD_ERROR,
> };
>
> -bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
> +u64 nested_get_evmptr(struct kvm_vcpu *vcpu);
> uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
> int nested_enable_evmcs(struct kvm_vcpu *vcpu,
> uint16_t *vmcs_version);
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index 4a827b3d929a..87bff81f7f3e 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -1995,7 +1995,8 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
> if (likely(!vmx->nested.enlightened_vmcs_enabled))
> return EVMPTRLD_DISABLED;
>
> - if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) {
> + evmcs_gpa = nested_get_evmptr(vcpu);
> + if (!evmptr_is_valid(evmcs_gpa)) {
> nested_release_evmcs(vcpu);
> return EVMPTRLD_DISABLED;
> }
> @@ -5084,7 +5085,6 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
> struct vcpu_vmx *vmx = to_vmx(vcpu);
> u32 zero = 0;
> gpa_t vmptr;
> - u64 evmcs_gpa;
> int r;
>
> if (!nested_vmx_check_permission(vcpu))
> @@ -5110,7 +5110,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
> * vmx->nested.hv_evmcs but this shouldn't be a problem.
> */
> if (likely(!vmx->nested.enlightened_vmcs_enabled ||
> - !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
> + !evmptr_is_valid(nested_get_evmptr(vcpu)))) {
> if (vmptr == vmx->nested.current_vmptr)
> nested_release_vmcs12(vcpu);
>
Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>
Best regards,
Maxim Levitsky
Powered by blists - more mailing lists