[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231110021306.1269082-8-seanjc@google.com>
Date: Thu, 9 Nov 2023 18:12:47 -0800
From: Sean Christopherson <seanjc@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>,
Paolo Bonzini <pbonzini@...hat.com>
Cc: kvm@...r.kernel.org, linux-kernel@...r.kernel.org,
Kan Liang <kan.liang@...ux.intel.com>,
Dapeng Mi <dapeng1.mi@...ux.intel.com>,
Jim Mattson <jmattson@...gle.com>,
Jinrong Liang <cloudliang@...cent.com>,
Aaron Lewis <aaronlewis@...gle.com>,
Like Xu <likexu@...cent.com>
Subject: [PATCH v8 07/26] KVM: x86/pmu: Apply "fast" RDPMC only to Intel PMUs
Move the handling of "fast" RDPMC instructions, which drop bits 63:31 of
the count, to Intel. The "fast" flag, and all flags for that matter, are
Intel-only and aren't supported by AMD.
Opportunistically replace open coded bit crud with proper #defines.
Fixes: ca724305a2b0 ("KVM: x86/vPMU: Implement AMD vPMU code for KVM")
Signed-off-by: Sean Christopherson <seanjc@...gle.com>
---
arch/x86/kvm/pmu.c | 3 +--
arch/x86/kvm/vmx/pmu_intel.c | 20 ++++++++++++++++----
2 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 99ed72966528..e3ba5e12c2e7 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -499,10 +499,9 @@ static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{
- bool fast_mode = idx & (1u << 31);
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
- u64 mask = fast_mode ? ~0u : ~0ull;
+ u64 mask = ~0ull;
if (!pmu->version)
return 1;
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 3bac3b32b485..c6ea128ea7c8 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -20,6 +20,10 @@
#include "nested.h"
#include "pmu.h"
+/* Perf's "BASE" is wildly misleading, this is a single-bit flag, not a base. */
+#define INTEL_RDPMC_FIXED INTEL_PMC_FIXED_RDPMC_BASE
+#define INTEL_RDPMC_FAST BIT(31)
+
#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
@@ -55,12 +59,17 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
}
}
+static u32 intel_rdpmc_get_masked_idx(struct kvm_pmu *pmu, u32 idx)
+{
+ return idx & ~(INTEL_RDPMC_FIXED | INTEL_RDPMC_FAST);
+}
+
static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- bool fixed = idx & (1u << 30);
+ bool fixed = idx & INTEL_RDPMC_FIXED;
- idx &= ~(3u << 30);
+ idx = intel_rdpmc_get_masked_idx(pmu, idx);
return fixed ? idx < pmu->nr_arch_fixed_counters
: idx < pmu->nr_arch_gp_counters;
@@ -70,11 +79,14 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- bool fixed = idx & (1u << 30);
+ bool fixed = idx & INTEL_RDPMC_FIXED;
struct kvm_pmc *counters;
unsigned int num_counters;
- idx &= ~(3u << 30);
+ if (idx & INTEL_RDPMC_FAST)
+ *mask &= GENMASK_ULL(31, 0);
+
+ idx = intel_rdpmc_get_masked_idx(pmu, idx);
if (fixed) {
counters = pmu->fixed_counters;
num_counters = pmu->nr_arch_fixed_counters;
--
2.42.0.869.gea05f2083d-goog
Powered by blists - more mailing lists