[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240116060129.55473-3-shahuang@redhat.com>
Date: Tue, 16 Jan 2024 01:01:20 -0500
From: Shaoqin Huang <shahuang@...hat.com>
To: Oliver Upton <oliver.upton@...ux.dev>,
Marc Zyngier <maz@...nel.org>,
kvmarm@...ts.linux.dev
Cc: Eric Auger <eauger@...hat.com>,
Shaoqin Huang <shahuang@...hat.com>,
Eric Auger <eric.auger@...hat.com>,
James Morse <james.morse@....com>,
Suzuki K Poulose <suzuki.poulose@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
Paolo Bonzini <pbonzini@...hat.com>,
Shuah Khan <shuah@...nel.org>,
linux-arm-kernel@...ts.infradead.org,
kvm@...r.kernel.org,
linux-kselftest@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v3 2/5] KVM: selftests: aarch64: Move pmu helper functions into vpmu.h
Move those pmu helper functions into include/aarch64/vpmu.h, thus
it can be used by other pmu test.
No functional change intended.
Reviewed-by: Eric Auger <eric.auger@...hat.com>
Signed-off-by: Shaoqin Huang <shahuang@...hat.com>
---
.../kvm/aarch64/vpmu_counter_access.c | 118 -----------------
.../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++
2 files changed, 119 insertions(+), 118 deletions(-)
diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
index 17305408a334..62d6315790ab 100644
--- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
+++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
@@ -20,12 +20,6 @@
#include <perf/arm_pmuv3.h>
#include <linux/bitfield.h>
-/* The max number of the PMU event counters (excluding the cycle counter) */
-#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
-
-/* The cycle counter bit position that's common among the PMU registers */
-#define ARMV8_PMU_CYCLE_IDX 31
-
static struct vpmu_vm *vpmu_vm;
struct pmreg_sets {
@@ -35,118 +29,6 @@ struct pmreg_sets {
#define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
-static uint64_t get_pmcr_n(uint64_t pmcr)
-{
- return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
-}
-
-static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
-{
- *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
- *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
-}
-
-static uint64_t get_counters_mask(uint64_t n)
-{
- uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
-
- if (n)
- mask |= GENMASK(n - 1, 0);
- return mask;
-}
-
-/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
-static inline unsigned long read_sel_evcntr(int sel)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- return read_sysreg(pmxevcntr_el0);
-}
-
-/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
-static inline void write_sel_evcntr(int sel, unsigned long val)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- write_sysreg(val, pmxevcntr_el0);
- isb();
-}
-
-/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
-static inline unsigned long read_sel_evtyper(int sel)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- return read_sysreg(pmxevtyper_el0);
-}
-
-/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
-static inline void write_sel_evtyper(int sel, unsigned long val)
-{
- write_sysreg(sel, pmselr_el0);
- isb();
- write_sysreg(val, pmxevtyper_el0);
- isb();
-}
-
-static inline void enable_counter(int idx)
-{
- uint64_t v = read_sysreg(pmcntenset_el0);
-
- write_sysreg(BIT(idx) | v, pmcntenset_el0);
- isb();
-}
-
-static inline void disable_counter(int idx)
-{
- uint64_t v = read_sysreg(pmcntenset_el0);
-
- write_sysreg(BIT(idx) | v, pmcntenclr_el0);
- isb();
-}
-
-static void pmu_disable_reset(void)
-{
- uint64_t pmcr = read_sysreg(pmcr_el0);
-
- /* Reset all counters, disabling them */
- pmcr &= ~ARMV8_PMU_PMCR_E;
- write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
- isb();
-}
-
-#define RETURN_READ_PMEVCNTRN(n) \
- return read_sysreg(pmevcntr##n##_el0)
-static unsigned long read_pmevcntrn(int n)
-{
- PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
- return 0;
-}
-
-#define WRITE_PMEVCNTRN(n) \
- write_sysreg(val, pmevcntr##n##_el0)
-static void write_pmevcntrn(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
- isb();
-}
-
-#define READ_PMEVTYPERN(n) \
- return read_sysreg(pmevtyper##n##_el0)
-static unsigned long read_pmevtypern(int n)
-{
- PMEVN_SWITCH(n, READ_PMEVTYPERN);
- return 0;
-}
-
-#define WRITE_PMEVTYPERN(n) \
- write_sysreg(val, pmevtyper##n##_el0)
-static void write_pmevtypern(int n, unsigned long val)
-{
- PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
- isb();
-}
-
/*
* The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
* accessors that test cases will use. Each of the accessors will
diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
index 0a56183644ee..e0cc1ca1c4b7 100644
--- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h
+++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h
@@ -1,10 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <kvm_util.h>
+#include <perf/arm_pmuv3.h>
#define GICD_BASE_GPA 0x8000000ULL
#define GICR_BASE_GPA 0x80A0000ULL
+/* The max number of the PMU event counters (excluding the cycle counter) */
+#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
+
+/* The cycle counter bit position that's common among the PMU registers */
+#define ARMV8_PMU_CYCLE_IDX 31
+
struct vpmu_vm {
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
@@ -14,3 +21,115 @@ struct vpmu_vm {
struct vpmu_vm *create_vpmu_vm(void *guest_code);
void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm);
+
+static inline uint64_t get_pmcr_n(uint64_t pmcr)
+{
+ return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+}
+
+static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
+{
+ *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
+ *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
+}
+
+static inline uint64_t get_counters_mask(uint64_t n)
+{
+ uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
+
+ if (n)
+ mask |= GENMASK(n - 1, 0);
+ return mask;
+}
+
+/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
+static inline unsigned long read_sel_evcntr(int sel)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ return read_sysreg(pmxevcntr_el0);
+}
+
+/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
+static inline void write_sel_evcntr(int sel, unsigned long val)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ write_sysreg(val, pmxevcntr_el0);
+ isb();
+}
+
+/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
+static inline unsigned long read_sel_evtyper(int sel)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ return read_sysreg(pmxevtyper_el0);
+}
+
+/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
+static inline void write_sel_evtyper(int sel, unsigned long val)
+{
+ write_sysreg(sel, pmselr_el0);
+ isb();
+ write_sysreg(val, pmxevtyper_el0);
+ isb();
+}
+
+static inline void enable_counter(int idx)
+{
+ uint64_t v = read_sysreg(pmcntenset_el0);
+
+ write_sysreg(BIT(idx) | v, pmcntenset_el0);
+ isb();
+}
+
+static inline void disable_counter(int idx)
+{
+ uint64_t v = read_sysreg(pmcntenset_el0);
+
+ write_sysreg(BIT(idx) | v, pmcntenclr_el0);
+ isb();
+}
+
+static inline void pmu_disable_reset(void)
+{
+ uint64_t pmcr = read_sysreg(pmcr_el0);
+
+ /* Reset all counters, disabling them */
+ pmcr &= ~ARMV8_PMU_PMCR_E;
+ write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
+ isb();
+}
+
+#define RETURN_READ_PMEVCNTRN(n) \
+ return read_sysreg(pmevcntr##n##_el0)
+static inline unsigned long read_pmevcntrn(int n)
+{
+ PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
+ return 0;
+}
+
+#define WRITE_PMEVCNTRN(n) \
+ write_sysreg(val, pmevcntr##n##_el0)
+static inline void write_pmevcntrn(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
+ isb();
+}
+
+#define READ_PMEVTYPERN(n) \
+ return read_sysreg(pmevtyper##n##_el0)
+static inline unsigned long read_pmevtypern(int n)
+{
+ PMEVN_SWITCH(n, READ_PMEVTYPERN);
+ return 0;
+}
+
+#define WRITE_PMEVTYPERN(n) \
+ write_sysreg(val, pmevtyper##n##_el0)
+static inline void write_pmevtypern(int n, unsigned long val)
+{
+ PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
+ isb();
+}
--
2.40.1
Powered by blists - more mailing lists