lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230911114347.85882-3-cloudliang@tencent.com>
Date:   Mon, 11 Sep 2023 19:43:40 +0800
From:   Jinrong Liang <ljr.kernel@...il.com>
To:     Sean Christopherson <seanjc@...gle.com>
Cc:     Paolo Bonzini <pbonzini@...hat.com>, Like Xu <likexu@...cent.com>,
        David Matlack <dmatlack@...gle.com>,
        Aaron Lewis <aaronlewis@...gle.com>,
        Vitaly Kuznetsov <vkuznets@...hat.com>,
        Wanpeng Li <wanpengli@...cent.com>,
        Jinrong Liang <cloudliang@...cent.com>, kvm@...r.kernel.org,
        linux-kernel@...r.kernel.org
Subject: [PATCH v4 2/9] KVM: selftests: Extend this_pmu_has() and kvm_pmu_has() to check arch events

From: Jinrong Liang <cloudliang@...cent.com>

The kvm_x86_pmu_feature struct has been updated to use the more
descriptive name "pmu_feature" instead of "anti_feature".

Extend this_pmu_has() and kvm_pmu_has() functions to better support
checking for Intel architectural events. Rename this_pmu_has() and
kvm_pmu_has() to this_pmu_has_arch_event() and kvm_pmu_has_arch_event().

Suggested-by: Sean Christopherson <seanjc@...gle.com>
Signed-off-by: Jinrong Liang <cloudliang@...cent.com>
---
 .../selftests/kvm/include/x86_64/processor.h  | 38 ++++++++++++++-----
 .../kvm/x86_64/pmu_event_filter_test.c        |  2 +-
 2 files changed, 29 insertions(+), 11 deletions(-)

diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 6b146e1c6736..ede433eb6541 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -280,12 +280,12 @@ struct kvm_x86_cpu_property {
  * architectural event is supported.
  */
 struct kvm_x86_pmu_feature {
-	struct kvm_x86_cpu_feature anti_feature;
+	struct kvm_x86_cpu_feature pmu_feature;
 };
 #define	KVM_X86_PMU_FEATURE(name, __bit)					\
 ({										\
 	struct kvm_x86_pmu_feature feature = {					\
-		.anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit),	\
+		.pmu_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit),		\
 	};									\
 										\
 	feature;								\
@@ -681,12 +681,21 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
 	return max_leaf >= property.function;
 }
 
-static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
+static inline bool this_pmu_has_arch_event(struct kvm_x86_pmu_feature feature)
 {
-	uint32_t nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+	uint32_t nr_bits;
 
-	return nr_bits > feature.anti_feature.bit &&
-	       !this_cpu_has(feature.anti_feature);
+	if (feature.pmu_feature.reg == KVM_CPUID_EBX) {
+		nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+		return nr_bits > feature.pmu_feature.bit &&
+			!this_cpu_has(feature.pmu_feature);
+	} else if (feature.pmu_feature.reg == KVM_CPUID_ECX) {
+		nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+		return nr_bits > feature.pmu_feature.bit ||
+			this_cpu_has(feature.pmu_feature);
+	} else {
+		TEST_FAIL("Invalid register in kvm_x86_pmu_feature");
+	}
 }
 
 static __always_inline uint64_t this_cpu_supported_xcr0(void)
@@ -900,12 +909,21 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
 	return max_leaf >= property.function;
 }
 
-static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
+static inline bool kvm_pmu_has_arch_event(struct kvm_x86_pmu_feature feature)
 {
-	uint32_t nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+	uint32_t nr_bits;
 
-	return nr_bits > feature.anti_feature.bit &&
-	       !kvm_cpu_has(feature.anti_feature);
+	if (feature.pmu_feature.reg == KVM_CPUID_EBX) {
+		nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+		return nr_bits > feature.pmu_feature.bit &&
+			!kvm_cpu_has(feature.pmu_feature);
+	} else if (feature.pmu_feature.reg == KVM_CPUID_ECX) {
+		nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+		return nr_bits > feature.pmu_feature.bit ||
+			kvm_cpu_has(feature.pmu_feature);
+	} else {
+		TEST_FAIL("Invalid register in kvm_x86_pmu_feature");
+	}
 }
 
 static inline size_t kvm_cpuid2_size(int nr_entries)
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 283cc55597a4..b0b91e6e79fb 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -408,7 +408,7 @@ static bool use_intel_pmu(void)
 	return host_cpu_is_intel &&
 	       kvm_cpu_property(X86_PROPERTY_PMU_VERSION) &&
 	       kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS) &&
-	       kvm_pmu_has(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
+	       kvm_pmu_has_arch_event(X86_PMU_FEATURE_BRANCH_INSNS_RETIRED);
 }
 
 static bool is_zen1(uint32_t family, uint32_t model)
-- 
2.39.3

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ