[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231104000239.367005-3-seanjc@google.com>
Date: Fri, 3 Nov 2023 17:02:20 -0700
From: Sean Christopherson <seanjc@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Kan Liang <kan.liang@...ux.intel.com>,
Dapeng Mi <dapeng1.mi@...ux.intel.com>,
Jinrong Liang <cloudliang@...cent.com>,
Like Xu <likexu@...cent.com>,
Jim Mattson <jmattson@...gle.com>,
Aaron Lewis <aaronlewis@...gle.com>
Subject: [PATCH v6 02/20] KVM: x86/pmu: Don't enumerate support for fixed
counters KVM can't virtualize
Hide fixed counters for which perf is incapable of creating the associated
architectural event. Except for the so called pseudo-architectural event
for counting TSC reference cycle, KVM virtualizes fixed counters by
creating a perf event for the associated general purpose architectural
event. If the associated event isn't supported in hardware, KVM can't
actually virtualize the fixed counter because perf will likely not program
up the correct event.
Note, this issue is almost certainly limited to running KVM on a funky
virtual CPU model, no known real hardware has an asymmetric PMU where a
fixed counter is supported but the associated architectural event is not.
Fixes: f5132b01386b ("KVM: Expose a version 2 architectural PMU to a guests")
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/pmu.h | 4 ++++
arch/x86/kvm/vmx/pmu_intel.c | 31 +++++++++++++++++++++++++++++++
2 files changed, 35 insertions(+)
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 1d64113de488..5341e8f69a22 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -19,6 +19,7 @@
#define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
struct kvm_pmu_ops {
+ void (*init_pmu_capability)(void);
bool (*hw_event_available)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
@@ -218,6 +219,9 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
pmu_ops->MAX_NR_GP_COUNTERS);
kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
KVM_PMC_MAX_FIXED);
+
+ if (pmu_ops->init_pmu_capability)
+ pmu_ops->init_pmu_capability();
}
static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 1b13a472e3f2..3316fdea212a 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -68,6 +68,36 @@ static int fixed_pmc_events[] = {
[2] = PSEUDO_ARCH_REFERENCE_CYCLES,
};
+static void intel_init_pmu_capability(void)
+{
+ int i;
+
+ /*
+ * Perf may (sadly) back a guest fixed counter with a general purpose
+ * counter, and so KVM must hide fixed counters whose associated
+ * architectural event are unsupported. On real hardware, this should
+ * never happen, but if KVM is running on a funky virtual CPU model...
+ *
+ * TODO: Drop this horror if/when KVM stops using perf events for
+ * guest fixed counters, or can explicitly request fixed counters.
+ */
+ for (i = 0; i < kvm_pmu_cap.num_counters_fixed; i++) {
+ int event = fixed_pmc_events[i];
+
+ /*
+ * Ignore pseudo-architectural events, they're a bizarre way of
+ * requesting events from perf that _can't_ be backed with a
+ * general purpose architectural event, i.e. they're guaranteed
+ * to be backed by the real fixed counter.
+ */
+ if (event < NR_REAL_INTEL_ARCH_EVENTS &&
+ (kvm_pmu_cap.events_mask & BIT(event)))
+ break;
+ }
+
+ kvm_pmu_cap.num_counters_fixed = i;
+}
+
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
struct kvm_pmc *pmc;
@@ -789,6 +819,7 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
}
struct kvm_pmu_ops intel_pmu_ops __initdata = {
+ .init_pmu_capability = intel_init_pmu_capability,
.hw_event_available = intel_hw_event_available,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
--
2.42.0.869.gea05f2083d-goog
Powered by blists - more mailing lists