[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230815000709.GF2257301@ls.amr.corp.intel.com>
Date: Mon, 14 Aug 2023 17:07:09 -0700
From: Isaku Yamahata <isaku.yamahata@...il.com>
To: Jinrong Liang <ljr.kernel@...il.com>
Cc: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Isaku Yamahata <isaku.yamahata@...el.com>,
Jim Mattson <jmattson@...gle.com>,
Shuah Khan <shuah@...nel.org>,
Aaron Lewis <aaronlewis@...gle.com>,
David Matlack <dmatlack@...gle.com>,
Vishal Annapurve <vannapurve@...gle.com>,
Wanpeng Li <wanpengli@...cent.com>,
Like Xu <like.xu.linux@...il.com>,
Jinrong Liang <cloudliang@...cent.com>,
linux-kselftest@...r.kernel.org, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, isaku.yamahata@...il.com
Subject: Re: [PATCH v6 5/6] KVM: selftests: Test if event filter meets
expectations on fixed counters
On Thu, Aug 10, 2023 at 05:09:44PM +0800,
Jinrong Liang <ljr.kernel@...il.com> wrote:
> From: Jinrong Liang <cloudliang@...cent.com>
>
> Add tests to cover that pmu event_filter works as expected when it's
> applied to fixed performance counters, even if there is none fixed
> counter exists (e.g. Intel guest pmu version=1 or AMD guest).
>
> Signed-off-by: Jinrong Liang <cloudliang@...cent.com>
> ---
> .../kvm/x86_64/pmu_event_filter_test.c | 80 +++++++++++++++++++
> 1 file changed, 80 insertions(+)
>
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> index 8b8bfee11016..732c76c41bb0 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> @@ -27,6 +27,7 @@
> #define ARCH_PERFMON_BRANCHES_RETIRED 5
>
> #define NUM_BRANCHES 42
> +#define INTEL_PMC_IDX_FIXED 32
>
> /* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> #define MAX_FILTER_EVENTS 300
> @@ -808,6 +809,84 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
> }
>
> +static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx)
> +{
> + for (;;) {
> + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
> + wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0);
> +
> + /* Only OS_EN bit is enabled for fixed counter[idx]. */
> + wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx));
> + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL,
> + BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
> + __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
> + wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
> +
> + GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx));
> + }
> +}
> +
> +static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
> + uint32_t action, uint32_t bitmap)
> +{
> + struct __kvm_pmu_event_filter f = {
> + .action = action,
> + .fixed_counter_bitmap = bitmap,
> + };
> + do_vcpu_set_pmu_event_filter(vcpu, &f);
> +
> + return run_vcpu_to_sync(vcpu);
> +}
> +
> +static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
> + uint8_t nr_fixed_counters)
> +{
> + unsigned int i;
> + uint32_t bitmap;
> + uint64_t count;
> +
> + TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8,
> + "Invalid nr_fixed_counters");
> +
> + /*
> + * Check the fixed performance counter can count normally when KVM
> + * userspace doesn't set any pmu filter.
> + */
> + count = run_vcpu_to_sync(vcpu);
> + TEST_ASSERT(count, "Unexpected count value: %ld\n", count);
> +
> + for (i = 0; i < BIT(nr_fixed_counters); i++) {
> + bitmap = BIT(i);
> + count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW,
> + bitmap);
> + TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
> +
> + count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY,
> + bitmap);
> + TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
> + }
> +}
> +
> +static void test_fixed_counter_bitmap(void)
> +{
> + uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
> + struct kvm_vm *vm;
> + struct kvm_vcpu *vcpu;
> + uint8_t idx;
> +
> + /*
> + * Check that pmu_event_filter works as expected when it's applied to
> + * fixed performance counters.
> + */
> + for (idx = 0; idx < nr_fixed_counters; idx++) {
> + vm = vm_create_with_one_vcpu(&vcpu,
> + intel_run_fixed_counter_guest_code);
> + vcpu_args_set(vcpu, 1, idx);
> + __test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters);
> + kvm_vm_free(vm);
> + }
> +}
> +
> int main(int argc, char *argv[])
> {
> void (*guest_code)(void);
> @@ -851,6 +930,7 @@ int main(int argc, char *argv[])
> kvm_vm_free(vm);
>
> test_pmu_config_disable(guest_code);
> + test_fixed_counter_bitmap();
>
> return 0;
> }
> --
> 2.39.3
>
Reviewed-by: Isaku Yamahata <isaku.yamahata@...el.com>
--
Isaku Yamahata <isaku.yamahata@...il.com>
Powered by blists - more mailing lists