[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CALMp9eT0SrXCLriBN+nBv5fFQQ3n+b4Guq=-yLsFFQjeQ-nczA@mail.gmail.com>
Date: Fri, 24 Mar 2023 16:19:51 -0700
From: Jim Mattson <jmattson@...gle.com>
To: Like Xu <like.xu.linux@...il.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] KVM: x86/pmu: Add Intel PMU supported fixed counters bit mask
On Tue, Mar 21, 2023 at 4:28 AM Like Xu <like.xu.linux@...il.com> wrote:
>
> From: Like Xu <likexu@...cent.com>
>
> Per Intel SDM, fixed-function performance counter 'i' is supported if:
>
> FxCtr[i]_is_supported := ECX[i] || (EDX[4:0] > i);
>
> which means that the KVM user space can use EDX to limit the number of
> fixed counters and at the same time, using ECX to enable part of other
> KVM supported fixed counters.
>
> Add a bitmap (instead of always checking the vcpu's CPUIDs) to keep track
> of the guest available fixed counters and perform the semantic checks.
>
> Signed-off-by: Like Xu <likexu@...cent.com>
> ---
> arch/x86/include/asm/kvm_host.h | 2 ++
> arch/x86/kvm/pmu.h | 8 +++++
> arch/x86/kvm/vmx/pmu_intel.c | 53 +++++++++++++++++++++------------
> 3 files changed, 44 insertions(+), 19 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index a45de1118a42..14689e583127 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -565,6 +565,8 @@ struct kvm_pmu {
> */
> bool need_cleanup;
>
> + DECLARE_BITMAP(supported_fixed_pmc_idx, KVM_PMC_MAX_FIXED);
> +
> /*
> * The total number of programmed perf_events and it helps to avoid
> * redundant check before cleanup if guest don't use vPMU at all.
> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
> index be62c16f2265..9f4504e5e9d5 100644
> --- a/arch/x86/kvm/pmu.h
> +++ b/arch/x86/kvm/pmu.h
> @@ -111,6 +111,11 @@ static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
> return NULL;
> }
>
> +static inline bool fixed_ctr_is_supported(struct kvm_pmu *pmu, unsigned int idx)
> +{
> + return test_bit(idx, pmu->supported_fixed_pmc_idx);
> +}
> +
> /* returns fixed PMC with the specified MSR */
> static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
> {
> @@ -120,6 +125,9 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
> u32 index = array_index_nospec(msr - base,
> pmu->nr_arch_fixed_counters);
>
> + if (!fixed_ctr_is_supported(pmu, index))
> + return NULL;
> +
> return &pmu->fixed_counters[index];
> }
>
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index e8a3be0b9df9..12f4b2fe7756 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -43,13 +43,16 @@ static int fixed_pmc_events[] = {1, 0, 7};
> static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
> {
> struct kvm_pmc *pmc;
> - u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
> + u8 new_ctrl, old_ctrl, old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
> int i;
>
> pmu->fixed_ctr_ctrl = data;
> for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
> - u8 new_ctrl = fixed_ctrl_field(data, i);
> - u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
> + if (!fixed_ctr_is_supported(pmu, i))
> + continue;
> +
> + new_ctrl = fixed_ctrl_field(data, i);
> + old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
>
> if (old_ctrl == new_ctrl)
> continue;
> @@ -125,6 +128,9 @@ static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
>
> idx &= ~(3u << 30);
>
> + if (fixed && !fixed_ctr_is_supported(pmu, idx))
> + return false;
> +
> return fixed ? idx < pmu->nr_arch_fixed_counters
> : idx < pmu->nr_arch_gp_counters;
> }
> @@ -145,7 +151,7 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
> counters = pmu->gp_counters;
> num_counters = pmu->nr_arch_gp_counters;
> }
> - if (idx >= num_counters)
> + if (idx >= num_counters || (fixed && !fixed_ctr_is_supported(pmu, idx)))
> return NULL;
> *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
> return &counters[array_index_nospec(idx, num_counters)];
> @@ -500,6 +506,9 @@ static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
> int i;
>
> for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
> + if (!fixed_ctr_is_supported(pmu, i))
> + continue;
> +
> pmc = &pmu->fixed_counters[i];
> event = fixed_pmc_events[array_index_nospec(i, size)];
> pmc->eventsel = (intel_arch_events[event].unit_mask << 8) |
> @@ -520,6 +529,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
>
> pmu->nr_arch_gp_counters = 0;
> pmu->nr_arch_fixed_counters = 0;
> + bitmap_zero(pmu->supported_fixed_pmc_idx, KVM_PMC_MAX_FIXED);
> pmu->counter_bitmask[KVM_PMC_GP] = 0;
> pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
> pmu->version = 0;
> @@ -551,13 +561,24 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
> pmu->available_event_types = ~entry->ebx &
> ((1ull << eax.split.mask_length) - 1);
>
> - if (pmu->version == 1) {
> - pmu->nr_arch_fixed_counters = 0;
> - } else {
> + counter_mask = ~(BIT_ULL(pmu->nr_arch_gp_counters) - 1);
> + bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
> +
> + if (pmu->version > 1) {
> pmu->nr_arch_fixed_counters =
> - min3(ARRAY_SIZE(fixed_pmc_events),
> - (size_t) edx.split.num_counters_fixed,
> - (size_t)kvm_pmu_cap.num_counters_fixed);
> + min_t(int, ARRAY_SIZE(fixed_pmc_events),
> + kvm_pmu_cap.num_counters_fixed);
> + for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
> + /* FxCtr[i]_is_supported := CPUID.0xA.ECX[i] || (EDX[4:0] > i) */
This is true only when pmu->version >= 5.
>From the SDM, volume 3, section 20.2.5 Architectural Performance
Monitoring Version 5:
With Architectural Performance Monitoring Version 5, register
CPUID.0AH.ECX indicates Fixed Counter enumeration. It is a bit mask
which enumerates the supported Fixed Counters in a processor. If bit
'i' is set, it implies that Fixed Counter 'i' is supported. Software
is recommended to use the following logic to check if a Fixed Counter
is supported on a given processor: FxCtr[i]_is_supported := ECX[i] ||
(EDX[4:0] > i);
Prior to PMU version 5, all fixed counters from 0 through <number of
fixed counters - 1> are supported.
> + if (!(entry->ecx & BIT_ULL(i) ||
> + edx.split.num_counters_fixed > i))
> + continue;
> +
> + set_bit(i, pmu->supported_fixed_pmc_idx);
> + set_bit(INTEL_PMC_MAX_GENERIC + i, pmu->all_valid_pmc_idx);
> + pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
> + counter_mask &= ~BIT_ULL(INTEL_PMC_MAX_GENERIC + i);
> + }
> edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
> kvm_pmu_cap.bit_width_fixed);
> pmu->counter_bitmask[KVM_PMC_FIXED] =
> @@ -565,10 +586,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
> setup_fixed_pmc_eventsel(pmu);
> }
>
> - for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
> - pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
> - counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
> - (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
> pmu->global_ctrl_mask = counter_mask;
> pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
> & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
> @@ -585,11 +602,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
> pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
> }
>
> - bitmap_set(pmu->all_valid_pmc_idx,
> - 0, pmu->nr_arch_gp_counters);
> - bitmap_set(pmu->all_valid_pmc_idx,
> - INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
> -
> perf_capabilities = vcpu_get_perf_capabilities(vcpu);
> if (cpuid_model_is_consistent(vcpu) &&
> (perf_capabilities & PMU_CAP_LBR_FMT))
> @@ -605,6 +617,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
> pmu->pebs_enable_mask = counter_mask;
> pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
> for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
> + if (!fixed_ctr_is_supported(pmu, i))
> + continue;
> +
> pmu->fixed_ctr_ctrl_mask &=
> ~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
> }
>
> base-commit: d8708b80fa0e6e21bc0c9e7276ad0bccef73b6e7
> --
> 2.40.0
>
Powered by blists - more mailing lists