[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <871r40kwvj.fsf@vitty.brq.redhat.com>
Date: Mon, 01 Nov 2021 11:12:32 +0100
From: Vitaly Kuznetsov <vkuznets@...hat.com>
To: Sean Christopherson <seanjc@...gle.com>
Cc: Wanpeng Li <wanpengli@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Joerg Roedel <joro@...tes.org>, kvm@...r.kernel.org,
linux-hyperv@...r.kernel.org, linux-arch@...r.kernel.org,
linux-kernel@...r.kernel.org, Ajay Garg <ajaygargnsit@...il.com>,
Paolo Bonzini <pbonzini@...hat.com>,
"K. Y. Srinivasan" <kys@...rosoft.com>,
Haiyang Zhang <haiyangz@...rosoft.com>,
Stephen Hemminger <sthemmin@...rosoft.com>,
Wei Liu <wei.liu@...nel.org>, Dexuan Cui <decui@...rosoft.com>,
Arnd Bergmann <arnd@...db.de>
Subject: Re: [PATCH v2 6/8] KVM: x86: Shove vp_bitmap handling down into
sparse_set_to_vcpu_mask()
Sean Christopherson <seanjc@...gle.com> writes:
> Move the vp_bitmap "allocation" that's needed to handle mismatched vp_index
> values down into sparse_set_to_vcpu_mask() and drop __always_inline from
> said helper. The need for an intermediate vp_bitmap is a detail that's
> specific to the sparse translation with mismatched VP<=>vCPU indexes and
> does not need to be exposed to the caller.
>
> Regarding the __always_inline, prior to commit f21dd494506a ("KVM: x86:
> hyperv: optimize sparse VP set processing") the helper, then named
> hv_vcpu_in_sparse_set(), was a tiny bit of code that effectively boiled
> down to a handful of bit ops. The __always_inline was understandable, if
> not justifiable. Since the aforementioned change, sparse_set_to_vcpu_mask()
> is a chunky 350-450+ bytes of code without KASAN=y, and balloons to 1100+
> with KASAN=y. In other words, it has no business being forcefully inlined.
>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
> arch/x86/kvm/hyperv.c | 65 +++++++++++++++++++++++++------------------
> 1 file changed, 38 insertions(+), 27 deletions(-)
>
> diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
> index 8832727d74d9..3d83d6a5d337 100644
> --- a/arch/x86/kvm/hyperv.c
> +++ b/arch/x86/kvm/hyperv.c
> @@ -1710,31 +1710,46 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
> return kvm_hv_get_msr(vcpu, msr, pdata, host);
> }
>
> -static __always_inline unsigned long *sparse_set_to_vcpu_mask(
> - struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
> - u64 *vp_bitmap, unsigned long *vcpu_bitmap)
> +static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
> + u64 valid_bank_mask, unsigned long *vcpu_mask)
> {
> struct kvm_hv *hv = to_kvm_hv(kvm);
> + bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes);
> + u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
> struct kvm_vcpu *vcpu;
> int i, bank, sbank = 0;
> + u64 *bitmap;
>
> - memset(vp_bitmap, 0,
> - KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
> + BUILD_BUG_ON(sizeof(vp_bitmap) >
> + sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS));
> +
> + /*
> + * If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else
> + * fill a temporary buffer and manually test each vCPU's VP index.
> + */
> + if (likely(!has_mismatch))
> + bitmap = (u64 *)vcpu_mask;
> + else
> + bitmap = vp_bitmap;
> +
> + /*
> + * Each set of 64 VPs is packed into sparse_banks, with valid_bank_amsk
'valid_bank_mask'
> + * having a '1' for each bank that exits in sparse_banks. Sets must be
'exists'
> + * in ascending order, i.e. bank0..bankN.
> + */
> + memset(bitmap, 0, sizeof(vp_bitmap));
> for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
> KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
> - vp_bitmap[bank] = sparse_banks[sbank++];
> + bitmap[bank] = sparse_banks[sbank++];
>
> - if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
> - /* for all vcpus vp_index == vcpu_idx */
> - return (unsigned long *)vp_bitmap;
> - }
> + if (likely(!has_mismatch))
> + return;
>
> - bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
> + bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
> kvm_for_each_vcpu(i, vcpu, kvm) {
> if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
> - __set_bit(i, vcpu_bitmap);
> + __set_bit(i, vcpu_mask);
> }
> - return vcpu_bitmap;
> }
>
> struct kvm_hv_hcall {
> @@ -1771,9 +1786,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
> struct kvm *kvm = vcpu->kvm;
> struct hv_tlb_flush_ex flush_ex;
> struct hv_tlb_flush flush;
> - u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
> - DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
> - unsigned long *vcpu_mask;
> + DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
> u64 valid_bank_mask;
> u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
> bool all_cpus;
> @@ -1858,11 +1871,9 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
> if (all_cpus) {
> kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH_GUEST);
> } else {
> - vcpu_mask = sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
> - vp_bitmap, vcpu_bitmap);
> + sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
>
> - kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
> - vcpu_mask);
> + kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, vcpu_mask);
> }
>
> ret_success:
> @@ -1895,9 +1906,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
> struct kvm *kvm = vcpu->kvm;
> struct hv_send_ipi_ex send_ipi_ex;
> struct hv_send_ipi send_ipi;
> - u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
> - DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
> - unsigned long *vcpu_mask;
> + DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
> unsigned long valid_bank_mask;
> u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
> u32 vector;
> @@ -1953,11 +1962,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
> if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
> return HV_STATUS_INVALID_HYPERCALL_INPUT;
>
> - vcpu_mask = all_cpus ? NULL :
> - sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
> - vp_bitmap, vcpu_bitmap);
> + if (all_cpus) {
> + kvm_send_ipi_to_many(kvm, vector, NULL);
> + } else {
> + sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
>
> - kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
> + kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
> + }
>
> ret_success:
> return HV_STATUS_SUCCESS;
With the nitpicks above addressed,
Reviewed-by: Vitaly Kuznetsov <vkuznets@...hat.com>
--
Vitaly
Powered by blists - more mailing lists