[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250620221326.1261128-21-coltonlewis@google.com>
Date: Fri, 20 Jun 2025 22:13:20 +0000
From: Colton Lewis <coltonlewis@...gle.com>
To: kvm@...r.kernel.org
Cc: Paolo Bonzini <pbonzini@...hat.com>, Jonathan Corbet <corbet@....net>,
Russell King <linux@...linux.org.uk>, Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>, Marc Zyngier <maz@...nel.org>, Oliver Upton <oliver.upton@...ux.dev>,
Joey Gouly <joey.gouly@....com>, Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>, Mark Rutland <mark.rutland@....com>,
Shuah Khan <shuah@...nel.org>, linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
linux-perf-users@...r.kernel.org, linux-kselftest@...r.kernel.org,
Colton Lewis <coltonlewis@...gle.com>
Subject: [PATCH v2 19/23] KVM: arm64: Enforce PMU event filter at vcpu_load()
The KVM API for event filtering says that counters do not count when
blocked by the event filter. To enforce that, the event filter must be
rechecked on every load. If the event is filtered, exclude counting at
all exception levels before writing the hardware.
Signed-off-by: Colton Lewis <coltonlewis@...gle.com>
---
arch/arm64/kvm/pmu-part.c | 43 +++++++++++++++++++++++++++++++++++++++
1 file changed, 43 insertions(+)
diff --git a/arch/arm64/kvm/pmu-part.c b/arch/arm64/kvm/pmu-part.c
index 19bd6e0da222..fd19a1dd7901 100644
--- a/arch/arm64/kvm/pmu-part.c
+++ b/arch/arm64/kvm/pmu-part.c
@@ -177,6 +177,47 @@ u8 kvm_pmu_hpmn(struct kvm_vcpu *vcpu)
return hpmn;
}
+/**
+ * kvm_pmu_apply_event_filter()
+ * @vcpu: Pointer to vcpu struct
+ *
+ * To uphold the guarantee of the KVM PMU event filter, we must ensure
+ * no counter counts if the event is filtered. Accomplish this by
+ * filtering all exception levels if the event is filtered.
+ */
+static void kvm_pmu_apply_event_filter(struct kvm_vcpu *vcpu)
+{
+ struct arm_pmu *pmu = vcpu->kvm->arch.arm_pmu;
+ u64 evtyper_set = kvm_pmu_evtyper_mask(vcpu->kvm)
+ & ~kvm_pmu_event_mask(vcpu->kvm)
+ & ~ARMV8_PMU_INCLUDE_EL2;
+ u64 evtyper_clr = ARMV8_PMU_INCLUDE_EL2;
+ u8 i;
+ u64 val;
+
+ for (i = 0; i < pmu->hpmn_max; i++) {
+ val = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
+
+ if (vcpu->kvm->arch.pmu_filter &&
+ !test_bit(val, vcpu->kvm->arch.pmu_filter)) {
+ val |= evtyper_set;
+ val &= ~evtyper_clr;
+ }
+
+ write_pmevtypern(i, val);
+ }
+
+ val = __vcpu_sys_reg(vcpu, PMCCFILTR_EL0);
+
+ if (vcpu->kvm->arch.pmu_filter &&
+ !test_bit(ARMV8_PMUV3_PERFCTR_CPU_CYCLES, vcpu->kvm->arch.pmu_filter)) {
+ val |= evtyper_set;
+ val &= ~evtyper_clr;
+ }
+
+ write_pmccfiltr(val);
+}
+
/**
* kvm_pmu_load() - Load untrapped PMU registers
* @vcpu: Pointer to struct kvm_vcpu
@@ -199,6 +240,8 @@ void kvm_pmu_load(struct kvm_vcpu *vcpu)
if (!kvm_pmu_is_partitioned(pmu) || (vcpu->arch.mdcr_el2 & MDCR_EL2_TPM))
return;
+ kvm_pmu_apply_event_filter(vcpu);
+
for (i = 0; i < pmu->hpmn_max; i++) {
val = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i);
write_pmevcntrn(i, val);
--
2.50.0.714.g196bf9f422-goog
Powered by blists - more mailing lists