[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260121225438.3908422-5-jmattson@google.com>
Date: Wed, 21 Jan 2026 14:54:02 -0800
From: Jim Mattson <jmattson@...gle.com>
To: Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>,
Thomas Gleixner <tglx@...utronix.de>, Ingo Molnar <mingo@...hat.com>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>, Peter Zijlstra <peterz@...radead.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>, Namhyung Kim <namhyung@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>,
Ian Rogers <irogers@...gle.com>, Adrian Hunter <adrian.hunter@...el.com>,
James Clark <james.clark@...aro.org>, Shuah Khan <shuah@...nel.org>, kvm@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
linux-kselftest@...r.kernel.org
Cc: Jim Mattson <jmattson@...gle.com>
Subject: [PATCH 4/6] KVM: x86/pmu: [De]activate HG_ONLY PMCs at SVME changes
and nested transitions
Add a new function, kvm_pmu_set_pmc_eventsel_hw_enable(), to set or clear
the enable bit in eventsel_hw for PMCs identified by a bitmap.
Use this function to update Host-Only and Guest-Only counters at the
following transitions:
- svm_set_efer(): When SVME changes, enable Guest-Only counters if SVME
is being cleared (HG_ONLY bits become ignored), or disable them if SVME
is being set (L1 is active).
- nested_svm_vmrun(): Disable Host-Only counters and enable Guest-Only
counters.
- nested_svm_vmexit(): Disable Guest-Only counters and enable Host-Only
counters.
Signed-off-by: Jim Mattson <jmattson@...gle.com>
---
arch/x86/include/asm/kvm-x86-pmu-ops.h | 1 +
arch/x86/kvm/pmu.c | 7 +++++++
arch/x86/kvm/pmu.h | 4 ++++
arch/x86/kvm/svm/nested.c | 10 ++++++++++
arch/x86/kvm/svm/pmu.c | 17 +++++++++++++++++
arch/x86/kvm/svm/svm.c | 3 +++
6 files changed, 42 insertions(+)
diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h
index f0aa6996811f..7b32796213a0 100644
--- a/arch/x86/include/asm/kvm-x86-pmu-ops.h
+++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h
@@ -26,6 +26,7 @@ KVM_X86_PMU_OP_OPTIONAL(cleanup)
KVM_X86_PMU_OP_OPTIONAL(write_global_ctrl)
KVM_X86_PMU_OP(mediated_load)
KVM_X86_PMU_OP(mediated_put)
+KVM_X86_PMU_OP_OPTIONAL(set_pmc_eventsel_hw_enable)
#undef KVM_X86_PMU_OP
#undef KVM_X86_PMU_OP_OPTIONAL
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 833ee2ecd43f..1541c201285b 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -1142,6 +1142,13 @@ void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_branch_retired);
+void kvm_pmu_set_pmc_eventsel_hw_enable(struct kvm_vcpu *vcpu,
+ unsigned long *bitmap, bool enable)
+{
+ kvm_pmu_call(set_pmc_eventsel_hw_enable)(vcpu, bitmap, enable);
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_set_pmc_eventsel_hw_enable);
+
static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
{
u64 mask = kvm_pmu_ops.EVENTSEL_EVENT |
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 0925246731cb..b8be8b6e40d8 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -41,6 +41,8 @@ struct kvm_pmu_ops {
void (*mediated_load)(struct kvm_vcpu *vcpu);
void (*mediated_put)(struct kvm_vcpu *vcpu);
void (*write_global_ctrl)(u64 global_ctrl);
+ void (*set_pmc_eventsel_hw_enable)(struct kvm_vcpu *vcpu,
+ unsigned long *bitmap, bool enable);
const u64 EVENTSEL_EVENT;
const int MAX_NR_GP_COUNTERS;
@@ -258,6 +260,8 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu);
void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu);
+void kvm_pmu_set_pmc_eventsel_hw_enable(struct kvm_vcpu *vcpu,
+ unsigned long *bitmap, bool enable);
void kvm_mediated_pmu_load(struct kvm_vcpu *vcpu);
void kvm_mediated_pmu_put(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index de90b104a0dd..edaa76e38417 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -28,6 +28,7 @@
#include "smm.h"
#include "cpuid.h"
#include "lapic.h"
+#include "pmu.h"
#include "svm.h"
#include "hyperv.h"
@@ -1054,6 +1055,11 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
goto out_exit_err;
+ kvm_pmu_set_pmc_eventsel_hw_enable(vcpu,
+ vcpu_to_pmu(vcpu)->pmc_hostonly, false);
+ kvm_pmu_set_pmc_eventsel_hw_enable(vcpu,
+ vcpu_to_pmu(vcpu)->pmc_guestonly, true);
+
if (nested_svm_merge_msrpm(vcpu))
goto out;
@@ -1137,6 +1143,10 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
/* Exit Guest-Mode */
leave_guest_mode(vcpu);
+ kvm_pmu_set_pmc_eventsel_hw_enable(vcpu,
+ vcpu_to_pmu(vcpu)->pmc_hostonly, true);
+ kvm_pmu_set_pmc_eventsel_hw_enable(vcpu,
+ vcpu_to_pmu(vcpu)->pmc_guestonly, false);
svm->nested.vmcb12_gpa = 0;
WARN_ON_ONCE(svm->nested.nested_run_pending);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index c06013e2b4b1..85155d65fa38 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -316,6 +316,22 @@ static void amd_mediated_pmu_put(struct kvm_vcpu *vcpu)
wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, pmu->global_status);
}
+static void amd_pmu_set_pmc_eventsel_hw_enable(struct kvm_vcpu *vcpu,
+ unsigned long *bitmap,
+ bool enable)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmc *pmc;
+ int i;
+
+ kvm_for_each_pmc(pmu, pmc, i, bitmap) {
+ if (enable)
+ pmc->eventsel_hw |= ARCH_PERFMON_EVENTSEL_ENABLE;
+ else
+ pmc->eventsel_hw &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
+ }
+}
+
struct kvm_pmu_ops amd_pmu_ops __initdata = {
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
@@ -329,6 +345,7 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
.is_mediated_pmu_supported = amd_pmu_is_mediated_pmu_supported,
.mediated_load = amd_mediated_pmu_load,
.mediated_put = amd_mediated_pmu_put,
+ .set_pmc_eventsel_hw_enable = amd_pmu_set_pmc_eventsel_hw_enable,
.EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
.MAX_NR_GP_COUNTERS = KVM_MAX_NR_AMD_GP_COUNTERS,
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 7803d2781144..953089b38921 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -244,6 +244,9 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
set_exception_intercept(svm, GP_VECTOR);
}
+
+ kvm_pmu_set_pmc_eventsel_hw_enable(vcpu,
+ vcpu_to_pmu(vcpu)->pmc_guestonly, !(efer & EFER_SVME));
}
svm->vmcb->save.efer = efer | EFER_SVME;
--
2.52.0.457.g6b5491de43-goog
Powered by blists - more mailing lists