[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <58bcee35-7573-7ea8-18f3-de741e3e7b9b@gmail.com>
Date: Tue, 24 Oct 2023 19:49:26 +0800
From: JinrongLiang <ljr.kernel@...il.com>
To: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Like Xu <likexu@...cent.com>,
Jinrong Liang <cloudliang@...cent.com>
Subject: Re: [PATCH v5 13/13] KVM: selftests: Extend PMU counters test to
permute on vPMU version
在 2023/10/24 08:26, Sean Christopherson 写道:
> Extent the PMU counters test to verify that KVM emulates the vPMU (or
> not) according to the vPMU version exposed to the guest. KVM's ABI (which
> does NOT reflect Intel's architectural behavior) is that GP counters are
> available if the PMU version is >0, and that fixed counters and
> PERF_GLOBAL_CTRL are available if the PMU version is >1.
>
> Test up to vPMU version 5, i.e. the current architectural max. KVM only
> officially supports up to version 2, but the behavior of the counters is
> backwards compatible, i.e. KVM shouldn't do something completely different
> for a higher, architecturally-defined vPMU version.
>
> Verify KVM behavior against the effective vPMU version, e.g. advertising
> vPMU 5 when KVM only supports vPMU 2 shouldn't magically unlock vPMU 5
> features.
>
> Suggested-by: Like Xu <likexu@...cent.com>
> Suggested-by: Jinrong Liang <cloudliang@...cent.com>
> Signed-off-by: Sean Christopherson <seanjc@...gle.com>
> ---
> .../selftests/kvm/x86_64/pmu_counters_test.c | 60 +++++++++++++++----
> 1 file changed, 47 insertions(+), 13 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> index 1c392ad156f4..85b01dd5b2cd 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
> @@ -12,6 +12,8 @@
> /* Guest payload for any performance counter counting */
> #define NUM_BRANCHES 10
>
> +static uint8_t kvm_pmu_version;
> +
> static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
> void *guest_code)
> {
> @@ -21,6 +23,8 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
> vm_init_descriptor_tables(vm);
> vcpu_init_descriptor_tables(*vcpu);
>
> + sync_global_to_guest(vm, kvm_pmu_version);
> +
> return vm;
> }
>
> @@ -97,6 +101,19 @@ static bool pmu_is_null_feature(struct kvm_x86_pmu_feature event)
> return !(*(u64 *)&event);
> }
>
> +static uint8_t guest_get_pmu_version(void)
> +{
> + /*
> + * Return the effective PMU version, i.e. the minimum between what KVM
> + * supports and what is enumerated to the guest. The counters test
> + * deliberately advertises a PMU version to the guest beyond what is
> + * actually supported by KVM to verify KVM doesn't freak out and do
> + * something bizarre with an architecturally valid, but unsupported,
> + * version.
> + */
> + return min_t(uint8_t, kvm_pmu_version, this_cpu_property(X86_PROPERTY_PMU_VERSION));
> +}
> +
> static void guest_measure_loop(uint8_t idx)
> {
> const struct {
> @@ -121,7 +138,7 @@ static void guest_measure_loop(uint8_t idx)
> };
>
> uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
> - uint32_t pmu_version = this_cpu_property(X86_PROPERTY_PMU_VERSION);
> + uint32_t pmu_version = guest_get_pmu_version();
> struct kvm_x86_pmu_feature gp_event, fixed_event;
> uint32_t counter_msr;
> unsigned int i;
> @@ -270,9 +287,12 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters
>
> static void guest_test_gp_counters(void)
> {
> - uint8_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
> + uint8_t nr_gp_counters = 0;
> uint32_t base_msr;
>
> + if (guest_get_pmu_version())
> + nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
> +
> if (rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
> base_msr = MSR_IA32_PMC0;
> else
> @@ -282,7 +302,8 @@ static void guest_test_gp_counters(void)
> GUEST_DONE();
> }
>
> -static void test_gp_counters(uint8_t nr_gp_counters, uint64_t perf_cap)
> +static void test_gp_counters(uint8_t pmu_version, uint8_t nr_gp_counters,
The parameter pmu_version is not used in this function.
> + uint64_t perf_cap)
> {
> struct kvm_vcpu *vcpu;
> struct kvm_vm *vm;
> @@ -305,16 +326,17 @@ static void guest_test_fixed_counters(void)
> uint8_t i;
>
> /* KVM provides fixed counters iff the vPMU version is 2+. */
> - if (this_cpu_property(X86_PROPERTY_PMU_VERSION) >= 2)
> + if (guest_get_pmu_version() >= 2)
> nr_fixed_counters = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
>
> /*
> * The supported bitmask for fixed counters was introduced in PMU
> * version 5.
> */
> - if (this_cpu_property(X86_PROPERTY_PMU_VERSION) >= 5)
> + if (guest_get_pmu_version() >= 5)
> supported_bitmask = this_cpu_property(X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK);
>
> +
> guest_rd_wr_counters(MSR_CORE_PERF_FIXED_CTR0, MAX_NR_FIXED_COUNTERS,
> nr_fixed_counters, supported_bitmask);
>
> @@ -345,7 +367,7 @@ static void guest_test_fixed_counters(void)
> GUEST_DONE();
> }
>
> -static void test_fixed_counters(uint8_t nr_fixed_counters,
> +static void test_fixed_counters(uint8_t pmu_version, uint8_t nr_fixed_counters,
The parameter pmu_version is not used in this function.
> uint32_t supported_bitmask, uint64_t perf_cap)
> {
> struct kvm_vcpu *vcpu;
> @@ -368,22 +390,32 @@ static void test_intel_counters(void)
> {
> uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
> uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
> + uint8_t max_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
> unsigned int i;
> + uint8_t j, v;
> uint32_t k;
> - uint8_t j;
>
> const uint64_t perf_caps[] = {
> 0,
> PMU_CAP_FW_WRITES,
> };
>
> - for (i = 0; i < ARRAY_SIZE(perf_caps); i++) {
> - for (j = 0; j <= nr_gp_counters; j++)
> - test_gp_counters(j, perf_caps[i]);
> + /*
> + * Test up to PMU v5, which is the current maximum version defined by
> + * Intel, i.e. is the last version that is guaranteed to be backwards
> + * compatible with KVM's existing behavior.
> + */
> + max_pmu_version = max_t(typeof(max_pmu_version), max_pmu_version, 5);
>
> - for (j = 0; j <= nr_fixed_counters; j++) {
> - for (k = 0; k <= (BIT(nr_fixed_counters) - 1); k++)
> - test_fixed_counters(j, k, perf_caps[i]);
> + for (v = 0; v <= max_pmu_version; v++) {
> + for (i = 0; i < ARRAY_SIZE(perf_caps) + 1; i++) {
> + for (j = 0; j <= nr_gp_counters; j++)
> + test_gp_counters(v, j, perf_caps[i]);
> +
> + for (j = 0; j <= nr_fixed_counters; j++) {
> + for (k = 0; k <= (BIT(nr_fixed_counters) - 1); k++)
> + test_fixed_counters(v, j, k, perf_caps[i]);
> + }
> }
> }
> }
> @@ -397,6 +429,8 @@ int main(int argc, char *argv[])
> TEST_REQUIRE(kvm_cpu_property(X86_PROPERTY_PMU_VERSION) > 0);
> TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_PDCM));
>
> + kvm_pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
> +
> test_intel_arch_events();
> test_intel_counters();
>
Powered by blists - more mailing lists