[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <159420191062.4006.3064418714677249662.tip-bot2@tip-bot2>
Date: Wed, 08 Jul 2020 09:51:50 -0000
From: "tip-bot2 for Kan Liang" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Kan Liang <kan.liang@...ux.intel.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
x86 <x86@...nel.org>, LKML <linux-kernel@...r.kernel.org>
Subject: [tip: perf/core] perf/x86/intel/lbr: Unify the stored format of LBR
information
The following commit has been merged into the perf/core branch of tip:
Commit-ID: 5624986dc61b81a77fb6136bc232593483d1c254
Gitweb: https://git.kernel.org/tip/5624986dc61b81a77fb6136bc232593483d1c254
Author: Kan Liang <kan.liang@...ux.intel.com>
AuthorDate: Fri, 03 Jul 2020 05:49:16 -07:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Wed, 08 Jul 2020 11:38:53 +02:00
perf/x86/intel/lbr: Unify the stored format of LBR information
Current LBR information in the structure x86_perf_task_context is stored
in a different format from the PEBS LBR record and Architecture LBR,
which prevents the sharing of the common codes.
Use the format of the PEBS LBR record as a unified format. Use a generic
name lbr_entry to replace pebs_lbr_entry.
Signed-off-by: Kan Liang <kan.liang@...ux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Link: https://lkml.kernel.org/r/1593780569-62993-11-git-send-email-kan.liang@linux.intel.com
---
arch/x86/events/intel/ds.c | 6 +++---
arch/x86/events/intel/lbr.c | 20 ++++++++++----------
arch/x86/events/perf_event.h | 6 ++----
arch/x86/include/asm/perf_event.h | 6 +-----
4 files changed, 16 insertions(+), 22 deletions(-)
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index dc43cc1..86848c5 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -954,7 +954,7 @@ static void adaptive_pebs_record_size_update(void)
if (pebs_data_cfg & PEBS_DATACFG_XMMS)
sz += sizeof(struct pebs_xmm);
if (pebs_data_cfg & PEBS_DATACFG_LBRS)
- sz += x86_pmu.lbr_nr * sizeof(struct pebs_lbr_entry);
+ sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry);
cpuc->pebs_record_size = sz;
}
@@ -1595,10 +1595,10 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
}
if (format_size & PEBS_DATACFG_LBRS) {
- struct pebs_lbr *lbr = next_record;
+ struct lbr_entry *lbr = next_record;
int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
& 0xff) + 1;
- next_record = next_record + num_lbr*sizeof(struct pebs_lbr_entry);
+ next_record = next_record + num_lbr * sizeof(struct lbr_entry);
if (has_branch_stack(event)) {
intel_pmu_store_pebs_lbrs(lbr);
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 7742562..b8baaf1 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -372,11 +372,11 @@ void intel_pmu_lbr_restore(void *ctx)
mask = x86_pmu.lbr_nr - 1;
for (i = 0; i < task_ctx->valid_lbrs; i++) {
lbr_idx = (tos - i) & mask;
- wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
- wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
+ wrlbr_from(lbr_idx, task_ctx->lbr[i].from);
+ wrlbr_to(lbr_idx, task_ctx->lbr[i].to);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
- wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
+ wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr[i].info);
}
for (; i < x86_pmu.lbr_nr; i++) {
@@ -440,10 +440,10 @@ void intel_pmu_lbr_save(void *ctx)
from = rdlbr_from(lbr_idx);
if (!from)
break;
- task_ctx->lbr_from[i] = from;
- task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
+ task_ctx->lbr[i].from = from;
+ task_ctx->lbr[i].to = rdlbr_to(lbr_idx);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
- rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
+ rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr[i].info);
}
task_ctx->valid_lbrs = i;
task_ctx->tos = tos;
@@ -1179,7 +1179,7 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
}
}
-void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
+void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
@@ -1193,11 +1193,11 @@ void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
for (i = 0; i < x86_pmu.lbr_nr; i++) {
- u64 info = lbr->lbr[i].info;
+ u64 info = lbr[i].info;
struct perf_branch_entry *e = &cpuc->lbr_entries[i];
- e->from = lbr->lbr[i].from;
- e->to = lbr->lbr[i].to;
+ e->from = lbr[i].from;
+ e->to = lbr[i].to;
e->mispred = !!(info & LBR_INFO_MISPRED);
e->predicted = !(info & LBR_INFO_MISPRED);
e->in_tx = !!(info & LBR_INFO_IN_TX);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ba89e56..aaa426d 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -765,13 +765,11 @@ struct x86_perf_task_context_opt {
};
struct x86_perf_task_context {
- u64 lbr_from[MAX_LBR_ENTRIES];
- u64 lbr_to[MAX_LBR_ENTRIES];
- u64 lbr_info[MAX_LBR_ENTRIES];
u64 lbr_sel;
int tos;
int valid_lbrs;
struct x86_perf_task_context_opt opt;
+ struct lbr_entry lbr[MAX_LBR_ENTRIES];
};
#define x86_add_quirk(func_) \
@@ -1092,7 +1090,7 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
void intel_pmu_auto_reload_read(struct perf_event *event);
-void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr);
+void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr);
void intel_ds_init(void);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 9ffce7d..2e29558 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -282,14 +282,10 @@ struct pebs_xmm {
u64 xmm[16*2]; /* two entries for each register */
};
-struct pebs_lbr_entry {
+struct lbr_entry {
u64 from, to, info;
};
-struct pebs_lbr {
- struct pebs_lbr_entry lbr[0]; /* Variable length */
-};
-
/*
* IBS cpuid feature detection
*/
Powered by blists - more mailing lists