[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZTG8YdrL98MEYo-p@google.com>
Date: Thu, 19 Oct 2023 16:31:45 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Jinrong Liang <ljr.kernel@...il.com>
Cc: Paolo Bonzini <pbonzini@...hat.com>, Like Xu <likexu@...cent.com>,
David Matlack <dmatlack@...gle.com>,
Aaron Lewis <aaronlewis@...gle.com>,
Vitaly Kuznetsov <vkuznets@...hat.com>,
Wanpeng Li <wanpengli@...cent.com>,
Jinrong Liang <cloudliang@...cent.com>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v4 2/9] KVM: selftests: Extend this_pmu_has() and
kvm_pmu_has() to check arch events
On Mon, Sep 11, 2023, Jinrong Liang wrote:
> From: Jinrong Liang <cloudliang@...cent.com>
>
> The kvm_x86_pmu_feature struct has been updated to use the more
> descriptive name "pmu_feature" instead of "anti_feature".
>
> Extend this_pmu_has() and kvm_pmu_has() functions to better support
> checking for Intel architectural events. Rename this_pmu_has() and
> kvm_pmu_has() to this_pmu_has_arch_event() and kvm_pmu_has_arch_event().
>
> Suggested-by: Sean Christopherson <seanjc@...gle.com>
> Signed-off-by: Jinrong Liang <cloudliang@...cent.com>
> ---
> .../selftests/kvm/include/x86_64/processor.h | 38 ++++++++++++++-----
> .../kvm/x86_64/pmu_event_filter_test.c | 2 +-
> 2 files changed, 29 insertions(+), 11 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
> index 6b146e1c6736..ede433eb6541 100644
> --- a/tools/testing/selftests/kvm/include/x86_64/processor.h
> +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
> @@ -280,12 +280,12 @@ struct kvm_x86_cpu_property {
> * architectural event is supported.
> */
> struct kvm_x86_pmu_feature {
> - struct kvm_x86_cpu_feature anti_feature;
> + struct kvm_x86_cpu_feature pmu_feature;
Eh, looking at this with fresh eyes, let's just use a single character to keep
the line lengths as short as possible. There was value in the anti_feature name,
but pmu_feature doesn't add anything IMO.
> };
> #define KVM_X86_PMU_FEATURE(name, __bit) \
> ({ \
> struct kvm_x86_pmu_feature feature = { \
> - .anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit), \
> + .pmu_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit), \
This needs to take in the register (EBX vs. ECX) for this helper to be useful.
> }; \
> \
> feature; \
> @@ -681,12 +681,21 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
> return max_leaf >= property.function;
> }
>
> -static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
> +static inline bool this_pmu_has_arch_event(struct kvm_x86_pmu_feature feature)
Why? I don't see the point. And it's confusing for fixed counters. Yeah, fixed
counters count architectural events, but the code is asking if a _counter_ is
supported, not if the associated event is supported. And the darn name gets too
long, too.
> {
> - uint32_t nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
> + uint32_t nr_bits;
>
> - return nr_bits > feature.anti_feature.bit &&
> - !this_cpu_has(feature.anti_feature);
> + if (feature.pmu_feature.reg == KVM_CPUID_EBX) {
> + nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
> + return nr_bits > feature.pmu_feature.bit &&
> + !this_cpu_has(feature.pmu_feature);
> + } else if (feature.pmu_feature.reg == KVM_CPUID_ECX) {
> + nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
> + return nr_bits > feature.pmu_feature.bit ||
> + this_cpu_has(feature.pmu_feature);
> + } else {
> + TEST_FAIL("Invalid register in kvm_x86_pmu_feature");
This needs to be a GUEST_ASSERT(), as the primary usage is in the guest.
And again looking at this with fresh eyes, I'd rather do
uint32_t nr_bits;
if (feature.f.reg == KVM_CPUID_EBX) {
nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
return nr_bits > feature.f.bit && !this_cpu_has(feature.f);
}
GUEST_ASSERT(feature.f.reg == KVM_CPUID_ECX);
nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
return nr_bits > feature.f.bit || this_cpu_has(feature.f);
so that the bogus register is printed out on failure.
> + }
> }
>
> static __always_inline uint64_t this_cpu_supported_xcr0(void)
> @@ -900,12 +909,21 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
> return max_leaf >= property.function;
> }
>
> -static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
> +static inline bool kvm_pmu_has_arch_event(struct kvm_x86_pmu_feature feature)
> {
> - uint32_t nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
> + uint32_t nr_bits;
>
> - return nr_bits > feature.anti_feature.bit &&
> - !kvm_cpu_has(feature.anti_feature);
> + if (feature.pmu_feature.reg == KVM_CPUID_EBX) {
> + nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
> + return nr_bits > feature.pmu_feature.bit &&
> + !kvm_cpu_has(feature.pmu_feature);
> + } else if (feature.pmu_feature.reg == KVM_CPUID_ECX) {
> + nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
> + return nr_bits > feature.pmu_feature.bit ||
> + kvm_cpu_has(feature.pmu_feature);
> + } else {
> + TEST_FAIL("Invalid register in kvm_x86_pmu_feature");
Same thing here.
> + }
Powered by blists - more mailing lists