[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241112103717.589952-10-james.clark@linaro.org>
Date: Tue, 12 Nov 2024 10:37:08 +0000
From: James Clark <james.clark@...aro.org>
To: suzuki.poulose@....com,
oliver.upton@...ux.dev,
coresight@...ts.linaro.org,
kvmarm@...ts.linux.dev
Cc: James Clark <james.clark@...aro.org>,
Marc Zyngier <maz@...nel.org>,
Joey Gouly <joey.gouly@....com>,
Zenghui Yu <yuzenghui@...wei.com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Mike Leach <mike.leach@...aro.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Mark Rutland <mark.rutland@....com>,
Anshuman Khandual <anshuman.khandual@....com>,
James Morse <james.morse@....com>,
Shiqi Liu <shiqiliu@...t.edu.cn>,
Fuad Tabba <tabba@...gle.com>,
Mark Brown <broonie@...nel.org>,
Raghavendra Rao Ananta <rananta@...gle.com>,
linux-arm-kernel@...ts.infradead.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v7 09/12] KVM: arm64: coresight: Give TRBE enabled state to KVM
Currently in nVHE, KVM has to check if TRBE is enabled on every guest
switch even if it was never used. Because it's a debug feature and is
more likely to not be used than used, give KVM the TRBE buffer status to
allow a much simpler and faster do-nothing path in the hyp.
This is always called with preemption disabled except for probe/hotplug
which gets wrapped with preempt_disable().
Signed-off-by: James Clark <james.clark@...aro.org>
---
arch/arm64/include/asm/kvm_host.h | 4 ++++
arch/arm64/kvm/debug.c | 16 ++++++++++++++++
drivers/hwtracing/coresight/coresight-trbe.c | 15 ++++++++++++---
3 files changed, 32 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 7f1e32d40f0c..b1dccac996a6 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -945,6 +945,8 @@ struct kvm_vcpu_arch {
#define HOST_FEAT_HAS_TRF __kvm_single_flag(feats, BIT(2))
/* PMBLIMITR_EL1_E is set (SPE profiling buffer enabled) */
#define HOST_STATE_SPE_EN __kvm_single_flag(state, BIT(0))
+/* TRBLIMITR_EL1_E is set (TRBE trace buffer enabled) */
+#define HOST_STATE_TRBE_EN __kvm_single_flag(state, BIT(1))
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
@@ -1387,6 +1389,7 @@ void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u64 clr);
bool kvm_set_pmuserenr(u64 val);
void kvm_set_pmblimitr(u64 pmblimitr);
+void kvm_set_trblimitr(u64 trblimitr);
#else
static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u64 clr) {}
@@ -1395,6 +1398,7 @@ static inline bool kvm_set_pmuserenr(u64 val)
return false;
}
static inline void kvm_set_pmblimitr(u64 pmblimitr) {}
+static inline void kvm_set_trblimitr(u64 trblimitr) {}
#endif
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index ed3b4d057c52..e99df2c3f62a 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -364,3 +364,19 @@ void kvm_set_pmblimitr(u64 pmblimitr)
host_data_clear_flag(HOST_STATE_SPE_EN);
}
EXPORT_SYMBOL_GPL(kvm_set_pmblimitr);
+
+void kvm_set_trblimitr(u64 trblimitr)
+{
+ /* Only read in nVHE */
+ if (has_vhe())
+ return;
+
+ if (kvm_arm_skip_trace_state())
+ return;
+
+ if (trblimitr & TRBLIMITR_EL1_E)
+ host_data_set_flag(HOST_STATE_TRBE_EN);
+ else
+ host_data_clear_flag(HOST_STATE_TRBE_EN);
+}
+EXPORT_SYMBOL_GPL(kvm_set_trblimitr);
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 96a32b213669..ff281b445682 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -18,6 +18,7 @@
#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <linux/vmalloc.h>
+#include <linux/kvm_host.h>
#include "coresight-self-hosted-trace.h"
#include "coresight-trbe.h"
@@ -213,6 +214,12 @@ static inline void trbe_drain_buffer(void)
dsb(nsh);
}
+static void trbe_write_trblimitr(u64 val)
+{
+ write_sysreg_s(val, SYS_TRBLIMITR_EL1);
+ kvm_set_trblimitr(val);
+}
+
static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
{
/*
@@ -220,7 +227,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
* might be required for fetching the buffer limits.
*/
trblimitr |= TRBLIMITR_EL1_E;
- write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ trbe_write_trblimitr(trblimitr);
/* Synchronize the TRBE enable event */
isb();
@@ -238,7 +245,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata)
* might be required for fetching the buffer limits.
*/
trblimitr &= ~TRBLIMITR_EL1_E;
- write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
+ trbe_write_trblimitr(trblimitr);
if (trbe_needs_drain_after_disable(cpudata))
trbe_drain_buffer();
@@ -253,8 +260,10 @@ static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
static void trbe_reset_local(struct trbe_cpudata *cpudata)
{
+ preempt_disable();
trbe_drain_and_disable_local(cpudata);
- write_sysreg_s(0, SYS_TRBLIMITR_EL1);
+ trbe_write_trblimitr(0);
+ preempt_enable();
write_sysreg_s(0, SYS_TRBPTR_EL1);
write_sysreg_s(0, SYS_TRBBASER_EL1);
write_sysreg_s(0, SYS_TRBSR_EL1);
--
2.34.1
Powered by blists - more mailing lists