[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191204204426.9628-4-eric.auger@redhat.com>
Date: Wed, 4 Dec 2019 21:44:26 +0100
From: Eric Auger <eric.auger@...hat.com>
To: eric.auger.pro@...il.com, eric.auger@...hat.com, maz@...nel.org,
linux-kernel@...r.kernel.org, kvmarm@...ts.cs.columbia.edu
Cc: james.morse@....com, andrew.murray@....com, suzuki.poulose@....com,
drjones@...hat.com
Subject: [RFC 3/3] KVM: arm64: pmu: Enforce PMEVTYPER evtCount size
ARMv8.1-PMU supports 16-bit evtCount whereas 8.0 only supports
10 bits.
On Seatlle which has an 8.0 PMU implementation, evtCount[15:10]
are not read as 0, as expected. Fix that by applying a mask on
the selected event that depends on the PMU version.
Also remove a redundant __vcpu_sys_reg() assignment (already
done in kvm_pmu_set_counter_even_type()).
Signed-off-by: Eric Auger <eric.auger@...hat.com>
---
arch/arm64/include/asm/perf_event.h | 5 ++++-
arch/arm64/include/asm/sysreg.h | 5 +++++
arch/arm64/kernel/perf_event.c | 2 +-
arch/arm64/kvm/sys_regs.c | 14 ++++++++++----
4 files changed, 20 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 2bdbc79bbd01..37ad1d654d2a 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -189,7 +189,10 @@
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
+/* Mask for writable bits featuring 10b evtCount (ARMv8.0-PMU)*/
+#define ARMV8_PMU_EVTYPE_MASK 0xc80003ff
+/* Mask for writable bits featuring 16b evtCount (ARMv8.1-PMU)*/
+#define ARMV8_1_PMU_EVTYPE_MASK 0xc800ffff
#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
/*
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 6e919fafb43d..e01b3e3acdf6 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -672,6 +672,11 @@
#define ID_AA64DFR0_TRACEVER_SHIFT 4
#define ID_AA64DFR0_DEBUGVER_SHIFT 0
+#define ID_AA64DFR0_PMUVER_NOT_IMPL 0x0
+#define ID_AA64DFR0_PMUVER_8_0 0x1
+#define ID_AA64DFR0_PMUVER_8_1 0x4
+#define ID_AA64DFR0_PMUVER_IMPL_DEF 0xF
+
#define ID_ISAR5_RDM_SHIFT 24
#define ID_ISAR5_CRC32_SHIFT 16
#define ID_ISAR5_SHA2_SHIFT 12
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index e40b65645c86..d5fe56190ad3 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -425,7 +425,7 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
armv8pmu_select_counter(idx);
- val &= ARMV8_PMU_EVTYPE_MASK;
+ val &= ARMV8_1_PMU_EVTYPE_MASK;
write_sysreg(val, pmxevtyper_el0);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 46822afc57e0..8deb6485d605 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -815,11 +815,17 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 idx, reg;
+ unsigned int pmuver;
+ u64 idx, reg, dfr0, evtype_mask;
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
+ dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
+ pmuver = cpuid_feature_extract_unsigned_field(dfr0,
+ ID_AA64DFR0_PMUVER_SHIFT);
+ evtype_mask = (pmuver == ID_AA64DFR0_PMUVER_8_0) ?
+ ARMV8_PMU_EVTYPE_MASK : ARMV8_1_PMU_EVTYPE_MASK;
if (pmu_access_el0_disabled(vcpu))
return false;
@@ -842,11 +848,11 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return false;
if (p->is_write) {
- kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
- __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
+ kvm_pmu_set_counter_event_type(vcpu,
+ p->regval & evtype_mask, idx);
kvm_vcpu_pmu_restore_guest(vcpu);
} else {
- p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
+ p->regval = __vcpu_sys_reg(vcpu, reg) & evtype_mask;
}
return true;
--
2.20.1
Powered by blists - more mailing lists