[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221107062514.2851047-6-anshuman.khandual@arm.com>
Date: Mon, 7 Nov 2022 11:55:12 +0530
From: Anshuman Khandual <anshuman.khandual@....com>
To: linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, peterz@...radead.org,
acme@...nel.org, mark.rutland@....com, will@...nel.org,
catalin.marinas@....com
Cc: Anshuman Khandual <anshuman.khandual@....com>,
Mark Brown <broonie@...nel.org>,
James Clark <james.clark@....com>,
Rob Herring <robh@...nel.org>, Marc Zyngier <maz@...nel.org>,
Suzuki Poulose <suzuki.poulose@....com>,
Ingo Molnar <mingo@...hat.com>
Subject: [PATCH V5 5/7] arm64/perf: Drive BRBE from perf event states
Branch stack sampling rides along the normal perf event and all the branch
records get captured during the PMU interrupt. This just changes perf event
handling on the arm64 platform to accommodate required BRBE operations that
will enable branch stack sampling support.
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: Mark Rutland <mark.rutland@....com>
Cc: Will Deacon <will@...nel.org>
Cc: Catalin Marinas <catalin.marinas@....com>
Cc: linux-perf-users@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
Cc: linux-arm-kernel@...ts.infradead.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@....com>
---
arch/arm64/kernel/perf_event.c | 7 ++++++
drivers/perf/arm_pmu.c | 40 ++++++++++++++++++++++++++++++++++
2 files changed, 47 insertions(+)
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index c97377e28288..97db333d1208 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -874,6 +874,13 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (!armpmu_event_set_period(event))
continue;
+ if (has_branch_stack(event)) {
+ cpu_pmu->brbe_read(cpuc, event);
+ data.br_stack = &cpuc->branches->brbe_stack;
+ data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
+ cpu_pmu->brbe_reset(cpuc);
+ }
+
/*
* Perf event overflow will queue the processing of the event as
* an irq_work which will be taken care of in the handling of
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 5048a500441e..1a8dca4e513e 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -271,12 +271,22 @@ armpmu_stop(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
*/
if (!(hwc->state & PERF_HES_STOPPED)) {
+ if (has_branch_stack(event)) {
+ WARN_ON_ONCE(!hw_events->brbe_users);
+ hw_events->brbe_users--;
+ if (!hw_events->brbe_users) {
+ hw_events->brbe_context = NULL;
+ armpmu->brbe_disable(hw_events);
+ }
+ }
+
armpmu->disable(event);
armpmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
@@ -287,6 +297,7 @@ static void armpmu_start(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
/*
* ARM pmu always has to reprogram the period, so ignore
@@ -304,6 +315,14 @@ static void armpmu_start(struct perf_event *event, int flags)
* happened since disabling.
*/
armpmu_event_set_period(event);
+ if (has_branch_stack(event)) {
+ if (event->ctx->task && hw_events->brbe_context != event->ctx) {
+ armpmu->brbe_reset(hw_events);
+ hw_events->brbe_context = event->ctx;
+ }
+ armpmu->brbe_enable(hw_events);
+ hw_events->brbe_users++;
+ }
armpmu->enable(event);
}
@@ -349,6 +368,10 @@ armpmu_add(struct perf_event *event, int flags)
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (has_branch_stack(event))
+ armpmu->brbe_filter(hw_events, event);
+
if (flags & PERF_EF_START)
armpmu_start(event, PERF_EF_RELOAD);
@@ -443,6 +466,7 @@ __hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
int mapping;
hwc->flags = 0;
@@ -492,6 +516,9 @@ __hw_perf_event_init(struct perf_event *event)
local64_set(&hwc->period_left, hwc->sample_period);
}
+ if (has_branch_stack(event))
+ armpmu->brbe_filter(hw_events, event);
+
return validate_group(event);
}
@@ -520,6 +547,18 @@ static int armpmu_event_init(struct perf_event *event)
return __hw_perf_event_init(event);
}
+static void armpmu_sched_task(struct perf_event_context *ctx, bool sched_in)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(ctx->pmu);
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+
+ if (!hw_events->brbe_users)
+ return;
+
+ if (sched_in)
+ armpmu->brbe_reset(hw_events);
+}
+
static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
@@ -877,6 +916,7 @@ static struct arm_pmu *__armpmu_alloc(gfp_t flags)
}
pmu->pmu = (struct pmu) {
+ .sched_task = armpmu_sched_task,
.pmu_enable = armpmu_enable,
.pmu_disable = armpmu_disable,
.event_init = armpmu_event_init,
--
2.25.1
Powered by blists - more mailing lists