[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260129232835.3710773-4-jmattson@google.com>
Date: Thu, 29 Jan 2026 15:28:08 -0800
From: Jim Mattson <jmattson@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>, Namhyung Kim <namhyung@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>,
Ian Rogers <irogers@...gle.com>, Adrian Hunter <adrian.hunter@...el.com>,
James Clark <james.clark@...aro.org>, Thomas Gleixner <tglx@...nel.org>, Borislav Petkov <bp@...en8.de>,
Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
"H. Peter Anvin" <hpa@...or.com>, Sean Christopherson <seanjc@...gle.com>, Paolo Bonzini <pbonzini@...hat.com>,
Shuah Khan <shuah@...nel.org>, linux-perf-users@...r.kernel.org,
linux-kernel@...r.kernel.org, kvm@...r.kernel.org,
linux-kselftest@...r.kernel.org
Cc: mizhang@...gle.com, yosryahmed@...gle.com, sandipan.das@....com,
Jim Mattson <jmattson@...gle.com>
Subject: [PATCH v2 3/5] KVM: x86/pmu: Refresh Host-Only/Guest-Only eventsel at
nested transitions
Add amd_pmu_refresh_host_guest_eventsel_hw() to recalculate eventsel_hw for
all PMCs based on the current vCPU state. This is needed because Host-Only
and Guest-Only counters must be enabled/disabled at:
- SVME changes: When EFER.SVME is modified, counters with Guest-Only bits
need their hardware enable state updated.
- Nested transitions: When entering or leaving guest mode, Host-Only
counters should be disabled/enabled and Guest-Only counters should be
enabled/disabled accordingly.
Introduce svm_enter_guest_mode() and svm_leave_guest_mode() wrappers that
call enter_guest_mode()/leave_guest_mode() followed by the PMU refresh,
ensuring the PMU state stays synchronized with guest mode transitions.
Signed-off-by: Jim Mattson <jmattson@...gle.com>
---
arch/x86/kvm/svm/nested.c | 6 +++---
arch/x86/kvm/svm/pmu.c | 12 ++++++++++++
arch/x86/kvm/svm/svm.c | 2 ++
arch/x86/kvm/svm/svm.h | 17 +++++++++++++++++
4 files changed, 34 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index de90b104a0dd..a7d1901f256b 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -757,7 +757,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
nested_svm_transition_tlb_flush(vcpu);
/* Enter Guest-Mode */
- enter_guest_mode(vcpu);
+ svm_enter_guest_mode(vcpu);
/*
* Filled at exit: exit_code, exit_info_1, exit_info_2, exit_int_info,
@@ -1136,7 +1136,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb12 = map.hva;
/* Exit Guest-Mode */
- leave_guest_mode(vcpu);
+ svm_leave_guest_mode(vcpu);
svm->nested.vmcb12_gpa = 0;
WARN_ON_ONCE(svm->nested.nested_run_pending);
@@ -1402,7 +1402,7 @@ void svm_leave_nested(struct kvm_vcpu *vcpu)
svm->nested.nested_run_pending = 0;
svm->nested.vmcb12_gpa = INVALID_GPA;
- leave_guest_mode(vcpu);
+ svm_leave_guest_mode(vcpu);
svm_switch_vmcb(svm, &svm->vmcb01);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 8d451110a94d..e2a849fc7daa 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -171,6 +171,18 @@ static void amd_pmu_set_eventsel_hw(struct kvm_pmc *pmc)
pmc->eventsel_hw &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
}
+void amd_pmu_refresh_host_guest_eventsel_hw(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ int i;
+
+ if (pmu->reserved_bits & AMD64_EVENTSEL_HOST_GUEST_MASK)
+ return;
+
+ for (i = 0; i < pmu->nr_arch_gp_counters; i++)
+ amd_pmu_set_eventsel_hw(&pmu->gp_counters[i]);
+}
+
static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5f0136dbdde6..498e098a3df0 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -244,6 +244,8 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
set_exception_intercept(svm, GP_VECTOR);
}
+
+ amd_pmu_refresh_host_guest_eventsel_hw(vcpu);
}
svm->vmcb->save.efer = efer | EFER_SVME;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index ebd7b36b1ceb..86d4d37bfb08 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -864,6 +864,23 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa);
void sev_es_unmap_ghcb(struct vcpu_svm *svm);
+
+/* pmu.c */
+void amd_pmu_refresh_host_guest_eventsel_hw(struct kvm_vcpu *vcpu);
+
+
+static inline void svm_enter_guest_mode(struct kvm_vcpu *vcpu)
+{
+ enter_guest_mode(vcpu);
+ amd_pmu_refresh_host_guest_eventsel_hw(vcpu);
+}
+
+static inline void svm_leave_guest_mode(struct kvm_vcpu *vcpu)
+{
+ leave_guest_mode(vcpu);
+ amd_pmu_refresh_host_guest_eventsel_hw(vcpu);
+}
+
#ifdef CONFIG_KVM_AMD_SEV
int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
int sev_mem_enc_register_region(struct kvm *kvm,
--
2.53.0.rc1.225.gd81095ad13-goog
Powered by blists - more mailing lists