[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c78f7ffd449dcf4151bfe438e37cd033e3f0e062.camel@redhat.com>
Date: Wed, 11 May 2022 14:24:53 +0300
From: Maxim Levitsky <mlevitsk@...hat.com>
To: Vitaly Kuznetsov <vkuznets@...hat.com>, kvm@...r.kernel.org,
Paolo Bonzini <pbonzini@...hat.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Michael Kelley <mikelley@...rosoft.com>,
Siddharth Chandrasekaran <sidcha@...zon.de>,
linux-hyperv@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 10/34] KVM: x86: hyper-v: Create a separate ring for
L2 TLB flush
On Thu, 2022-04-14 at 15:19 +0200, Vitaly Kuznetsov wrote:
> To handle L2 TLB flush requests, KVM needs to use a separate ring from
> regular (L1) Hyper-V TLB flush requests: e.g. when a request to flush
> something in L2 is made, the target vCPU can transition from L2 to L1,
> receive a request to flush a GVA for L1 and then try to enter L2 back.
> The first request needs to be processed at this point. Similarly,
> requests to flush GVAs in L1 must wait until L2 exits to L1.
>
> No functional change as KVM doesn't handle L2 TLB flush requests from
> L2 yet.
>
> Signed-off-by: Vitaly Kuznetsov <vkuznets@...hat.com>
> ---
> arch/x86/include/asm/kvm_host.h | 8 +++++++-
> arch/x86/kvm/hyperv.c | 8 +++++---
> arch/x86/kvm/hyperv.h | 19 ++++++++++++++++---
> 3 files changed, 28 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index b4dd2ff61658..058061621872 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -580,6 +580,12 @@ struct kvm_vcpu_hv_synic {
>
> #define KVM_HV_TLB_FLUSH_RING_SIZE (16)
>
> +enum hv_tlb_flush_rings {
> + HV_L1_TLB_FLUSH_RING,
> + HV_L2_TLB_FLUSH_RING,
> + HV_NR_TLB_FLUSH_RINGS,
> +};
> +
> struct kvm_vcpu_hv_tlb_flush_entry {
> u64 addr;
> u64 flush_all:1;
> @@ -612,7 +618,7 @@ struct kvm_vcpu_hv {
> u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
> } cpuid_cache;
>
> - struct kvm_vcpu_hv_tlb_flush_ring tlb_flush_ring;
> + struct kvm_vcpu_hv_tlb_flush_ring tlb_flush_ring[HV_NR_TLB_FLUSH_RINGS];
> };
>
> /* Xen HVM per vcpu emulation context */
> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> index aebbb598ad1d..1cef2b8f7001 100644
> --- a/arch/x86/kvm/hyperv.c
> +++ b/arch/x86/kvm/hyperv.c
> @@ -956,7 +956,8 @@ static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
>
> hv_vcpu->vp_index = vcpu->vcpu_idx;
>
> - spin_lock_init(&hv_vcpu->tlb_flush_ring.write_lock);
> + for (i = 0; i < HV_NR_TLB_FLUSH_RINGS; i++)
> + spin_lock_init(&hv_vcpu->tlb_flush_ring[i].write_lock);
>
> return 0;
> }
> @@ -1852,7 +1853,8 @@ static void hv_tlb_flush_ring_enqueue(struct kvm_vcpu *vcpu, u64 *entries, int c
> if (!hv_vcpu)
> return;
>
> - tlb_flush_ring = &hv_vcpu->tlb_flush_ring;
> + /* kvm_hv_flush_tlb() is not ready to handle requests for L2s yet */
> + tlb_flush_ring = &hv_vcpu->tlb_flush_ring[HV_L1_TLB_FLUSH_RING];
>
> spin_lock_irqsave(&tlb_flush_ring->write_lock, flags);
>
> @@ -1921,7 +1923,7 @@ void kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
> return;
> }
>
> - tlb_flush_ring = &hv_vcpu->tlb_flush_ring;
> + tlb_flush_ring = kvm_hv_get_tlb_flush_ring(vcpu);
>
> /*
> * TLB flush must be performed on the target vCPU so 'read_idx'
> diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
> index 6847caeaaf84..d59f96700104 100644
> --- a/arch/x86/kvm/hyperv.h
> +++ b/arch/x86/kvm/hyperv.h
> @@ -22,6 +22,7 @@
> #define __ARCH_X86_KVM_HYPERV_H__
>
> #include <linux/kvm_host.h>
> +#include "x86.h"
>
> /*
> * The #defines related to the synthetic debugger are required by KDNet, but
> @@ -147,15 +148,27 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
> int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
> struct kvm_cpuid_entry2 __user *entries);
>
> +static inline struct kvm_vcpu_hv_tlb_flush_ring *kvm_hv_get_tlb_flush_ring(struct kvm_vcpu *vcpu)
> +{
> + struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
> + int i = !is_guest_mode(vcpu) ? HV_L1_TLB_FLUSH_RING :
> + HV_L2_TLB_FLUSH_RING;
> +
> + /* KVM does not handle L2 TLB flush requests yet */
> + WARN_ON_ONCE(i != HV_L1_TLB_FLUSH_RING);
> +
> + return &hv_vcpu->tlb_flush_ring[i];
> +}
>
> static inline void kvm_hv_vcpu_empty_flush_tlb(struct kvm_vcpu *vcpu)
> {
> - struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
> + struct kvm_vcpu_hv_tlb_flush_ring *tlb_flush_ring;
>
> - if (!hv_vcpu)
> + if (!to_hv_vcpu(vcpu))
> return;
>
> - hv_vcpu->tlb_flush_ring.read_idx = hv_vcpu->tlb_flush_ring.write_idx;
> + tlb_flush_ring = kvm_hv_get_tlb_flush_ring(vcpu);
> + tlb_flush_ring->read_idx = tlb_flush_ring->write_idx;
> }
> void kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu);
>
Reviewed-by: Maxim Levitsky <mlevitsk@...hat.com>
Best regards,
Maxim Levitsky
Powered by blists - more mailing lists