[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230517022410.722287-11-anshuman.khandual@arm.com>
Date: Wed, 17 May 2023 07:54:10 +0530
From: Anshuman Khandual <anshuman.khandual@....com>
To: linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
will@...nel.org, catalin.marinas@....com, mark.rutland@....com
Cc: Anshuman Khandual <anshuman.khandual@....com>,
Mark Brown <broonie@...nel.org>,
James Clark <james.clark@....com>,
Rob Herring <robh@...nel.org>, Marc Zyngier <maz@...nel.org>,
Suzuki Poulose <suzuki.poulose@....com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
linux-perf-users@...r.kernel.org
Subject: [PATCH V10 10/10] arm64/perf: Implement branch records save on PMU IRQ
This modifies armv8pmu_branch_read() to concatenate live entries along with
task context stored entries and then process the resultant buffer to create
perf branch entry array for perf_sample_data. It follows the same principle
like task sched out.
Cc: Catalin Marinas <catalin.marinas@....com>
Cc: Will Deacon <will@...nel.org>
Cc: Mark Rutland <mark.rutland@....com>
Cc: linux-arm-kernel@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@....com>
---
drivers/perf/arm_brbe.c | 75 +++++++++++++++--------------------------
1 file changed, 28 insertions(+), 47 deletions(-)
diff --git a/drivers/perf/arm_brbe.c b/drivers/perf/arm_brbe.c
index 0678ebf0a896..9e441141a2c3 100644
--- a/drivers/perf/arm_brbe.c
+++ b/drivers/perf/arm_brbe.c
@@ -693,41 +693,45 @@ void armv8pmu_branch_reset(void)
isb();
}
-static bool capture_branch_entry(struct pmu_hw_events *cpuc,
- struct perf_event *event, int idx)
+static void brbe_regset_branch_entries(struct pmu_hw_events *cpuc, struct perf_event *event,
+ struct brbe_regset *regset, int idx)
{
struct perf_branch_entry *entry = &cpuc->branches->branch_entries[idx];
- u64 brbinf = get_brbinf_reg(idx);
-
- /*
- * There are no valid entries anymore on the buffer.
- * Abort the branch record processing to save some
- * cycles and also reduce the capture/process load
- * for the user space as well.
- */
- if (brbe_invalid(brbinf))
- return false;
+ u64 brbinf = regset[idx].brbinf;
perf_clear_branch_entry_bitfields(entry);
if (brbe_record_is_complete(brbinf)) {
- entry->from = get_brbsrc_reg(idx);
- entry->to = get_brbtgt_reg(idx);
+ entry->from = regset[idx].brbsrc;
+ entry->to = regset[idx].brbtgt;
} else if (brbe_record_is_source_only(brbinf)) {
- entry->from = get_brbsrc_reg(idx);
+ entry->from = regset[idx].brbsrc;
entry->to = 0;
} else if (brbe_record_is_target_only(brbinf)) {
entry->from = 0;
- entry->to = get_brbtgt_reg(idx);
+ entry->to = regset[idx].brbtgt;
}
capture_brbe_flags(entry, event, brbinf);
- return true;
+}
+
+static void process_branch_entries(struct pmu_hw_events *cpuc, struct perf_event *event,
+ struct brbe_regset *regset, int nr_regset)
+{
+ int idx;
+
+ for (idx = 0; idx < nr_regset; idx++)
+ brbe_regset_branch_entries(cpuc, event, regset, idx);
+
+ cpuc->branches->branch_stack.nr = nr_regset;
+ cpuc->branches->branch_stack.hw_idx = -1ULL;
}
void armv8pmu_branch_read(struct pmu_hw_events *cpuc, struct perf_event *event)
{
struct brbe_hw_attr *brbe_attr = (struct brbe_hw_attr *)cpuc->percpu_pmu->private;
+ struct arm64_perf_task_context *task_ctx = event->pmu_ctx->task_ctx_data;
+ struct brbe_regset live[BRBE_MAX_ENTRIES];
+ int nr_live, nr_store;
u64 brbfcr, brbcr;
- int idx, loop1_idx1, loop1_idx2, loop2_idx1, loop2_idx2, count;
brbcr = read_sysreg_s(SYS_BRBCR_EL1);
brbfcr = read_sysreg_s(SYS_BRBFCR_EL1);
@@ -739,36 +743,13 @@ void armv8pmu_branch_read(struct pmu_hw_events *cpuc, struct perf_event *event)
write_sysreg_s(brbfcr | BRBFCR_EL1_PAUSED, SYS_BRBFCR_EL1);
isb();
- /* Determine the indices for each loop */
- loop1_idx1 = BRBE_BANK0_IDX_MIN;
- if (brbe_attr->brbe_nr <= BRBE_BANK_MAX_ENTRIES) {
- loop1_idx2 = brbe_attr->brbe_nr - 1;
- loop2_idx1 = BRBE_BANK1_IDX_MIN;
- loop2_idx2 = BRBE_BANK0_IDX_MAX;
- } else {
- loop1_idx2 = BRBE_BANK0_IDX_MAX;
- loop2_idx1 = BRBE_BANK1_IDX_MIN;
- loop2_idx2 = brbe_attr->brbe_nr - 1;
- }
-
- /* Loop through bank 0 */
- select_brbe_bank(BRBE_BANK_IDX_0);
- for (idx = 0, count = loop1_idx1; count <= loop1_idx2; idx++, count++) {
- if (!capture_branch_entry(cpuc, event, idx))
- goto skip_bank_1;
- }
-
- /* Loop through bank 1 */
- select_brbe_bank(BRBE_BANK_IDX_1);
- for (count = loop2_idx1; count <= loop2_idx2; idx++, count++) {
- if (!capture_branch_entry(cpuc, event, idx))
- break;
- }
-
-skip_bank_1:
- cpuc->branches->branch_stack.nr = idx;
- cpuc->branches->branch_stack.hw_idx = -1ULL;
+ nr_live = capture_brbe_regset(brbe_attr, live);
+ nr_store = task_ctx->nr_brbe_records;
+ nr_store = stitch_stored_live_entries(task_ctx->store, live, nr_store,
+ nr_live, brbe_attr->brbe_nr);
+ process_branch_entries(cpuc, event, task_ctx->store, nr_store);
process_branch_aborts(cpuc);
+ task_ctx->nr_brbe_records = 0;
/* Unpause the buffer */
write_sysreg_s(brbfcr & ~BRBFCR_EL1_PAUSED, SYS_BRBFCR_EL1);
--
2.25.1
Powered by blists - more mailing lists