[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260121225438.3908422-4-jmattson@google.com>
Date: Wed, 21 Jan 2026 14:54:01 -0800
From: Jim Mattson <jmattson@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>, Peter Zijlstra <peterz@...radead.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>, Namhyung Kim <namhyung@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>,
Ian Rogers <irogers@...gle.com>, Adrian Hunter <adrian.hunter@...el.com>,
James Clark <james.clark@...aro.org>, Shuah Khan <shuah@...nel.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
linux-kselftest@...r.kernel.org
Cc: Jim Mattson <jmattson@...gle.com>
Subject: [PATCH 3/6] KVM: x86/pmu: Track enabled AMD PMCs with Host-Only xor
Guest-Only bits set
Add pmc_hostonly and pmc_guestonly bitmaps to struct kvm_pmu to track which
guest-enabled performance counters have just one of the Host-Only and
Guest-Only event selector bits set. PMCs that are disabled, have neither
HG_ONLY bit set, or have both HG_ONLY bits set are not tracked, because
they don't require special handling at vCPU state transitions.
Update the bitmaps when the guest writes to an event selector MSR.
Signed-off-by: Jim Mattson <jmattson@...gle.com>
---
arch/x86/include/asm/kvm_host.h | 4 ++++
arch/x86/kvm/pmu.c | 2 ++
arch/x86/kvm/svm/pmu.c | 28 ++++++++++++++++++++++++++++
3 files changed, 34 insertions(+)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ecd4019b84b7..92050f76f84b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -593,6 +593,10 @@ struct kvm_pmu {
DECLARE_BITMAP(pmc_counting_instructions, X86_PMC_IDX_MAX);
DECLARE_BITMAP(pmc_counting_branches, X86_PMC_IDX_MAX);
+ /* AMD only: track PMCs with Host-Only or Guest-Only bits set */
+ DECLARE_BITMAP(pmc_hostonly, X86_PMC_IDX_MAX);
+ DECLARE_BITMAP(pmc_guestonly, X86_PMC_IDX_MAX);
+
u64 ds_area;
u64 pebs_enable;
u64 pebs_enable_rsvd;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index bd6b785cf261..833ee2ecd43f 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -921,6 +921,8 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
pmu->need_cleanup = false;
bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
+ bitmap_zero(pmu->pmc_hostonly, X86_PMC_IDX_MAX);
+ bitmap_zero(pmu->pmc_guestonly, X86_PMC_IDX_MAX);
kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) {
pmc_stop_counter(pmc);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index f619417557f9..c06013e2b4b1 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -147,6 +147,33 @@ static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
}
+static void amd_pmu_update_hg_bitmaps(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+ u64 eventsel = pmc->eventsel;
+
+ if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE)) {
+ bitmap_clear(pmu->pmc_hostonly, pmc->idx, 1);
+ bitmap_clear(pmu->pmc_guestonly, pmc->idx, 1);
+ return;
+ }
+
+ switch (eventsel & AMD64_EVENTSEL_HG_ONLY) {
+ case AMD64_EVENTSEL_HOSTONLY:
+ bitmap_set(pmu->pmc_hostonly, pmc->idx, 1);
+ bitmap_clear(pmu->pmc_guestonly, pmc->idx, 1);
+ break;
+ case AMD64_EVENTSEL_GUESTONLY:
+ bitmap_clear(pmu->pmc_hostonly, pmc->idx, 1);
+ bitmap_set(pmu->pmc_guestonly, pmc->idx, 1);
+ break;
+ default:
+ bitmap_clear(pmu->pmc_hostonly, pmc->idx, 1);
+ bitmap_clear(pmu->pmc_guestonly, pmc->idx, 1);
+ break;
+ }
+}
+
static bool amd_pmu_dormant_hg_event(struct kvm_pmc *pmc)
{
u64 hg_only = pmc->eventsel & AMD64_EVENTSEL_HG_ONLY;
@@ -196,6 +223,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data != pmc->eventsel) {
pmc->eventsel = data;
amd_pmu_set_eventsel_hw(pmc);
+ amd_pmu_update_hg_bitmaps(pmc);
kvm_pmu_request_counter_reprogram(pmc);
}
return 0;
--
2.52.0.457.g6b5491de43-goog
Powered by blists - more mailing lists