[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1466533874-52003-5-git-send-email-davidcc@google.com>
Date: Tue, 21 Jun 2016 11:31:13 -0700
From: David Carrillo-Cisneros <davidcc@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: "x86@...nel.org" <x86@...nel.org>, Ingo Molnar <mingo@...hat.com>,
Andi Kleen <ak@...ux.intel.com>,
Kan Liang <kan.liang@...el.com>,
Peter Zijlstra <peterz@...radead.org>,
David Carrillo-Cisneros <davidcc@...gle.com>
Subject: [PATCH v02 4/5] perf/x86/intel: MSR_LAST_BRANCH_FROM_x quirk for ctx switch
Add quirk for context switch to save/restore the value of
MSR_LAST_BRANCH_FROM_x when LBR is enabled and there is potential for
kernel addresses to be in the lbr_from register.
To test this patch, use a perf tool and kernel with the patch
next in this series. That patch removes the work around that masked
the hw bug:
$ ./lbr_perf record --call-graph lbr -e cycles:k sleep 1
where lbr_perf is the patched perf tool, that allows to specify :k
on lbr mode. The above command will trigger a #GPF :
[ 411.191445] ------------[ cut here ]------------
[ 411.196015] WARNING: CPU: 28 PID: 14096 at arch/x86/mm/extable.c:65 ex_handler_wrmsr_unsafe+0x70/0x80
[ 411.205123] unchecked MSR access error: WRMSR to 0x681 (tried to write 0x1fffffff81010794)
...
[ 411.265962] Call Trace:
[ 411.268384] [<ffffffff8167af49>] dump_stack+0x4d/0x63
[ 411.273462] [<ffffffff810b9b15>] __warn+0xe5/0x100
[ 411.278278] [<ffffffff810b9be9>] warn_slowpath_fmt+0x49/0x50
[ 411.283955] [<ffffffff810abb40>] ex_handler_wrmsr_unsafe+0x70/0x80
[ 411.290144] [<ffffffff810abc42>] fixup_exception+0x42/0x50
[ 411.295658] [<ffffffff81079d1a>] do_general_protection+0x8a/0x160
[ 411.301764] [<ffffffff81684ec2>] general_protection+0x22/0x30
[ 411.307527] [<ffffffff810101b9>] ? intel_pmu_lbr_sched_task+0xc9/0x380
[ 411.314063] [<ffffffff81009d7c>] intel_pmu_sched_task+0x3c/0x60
[ 411.319996] [<ffffffff81003a2b>] x86_pmu_sched_task+0x1b/0x20
[ 411.325762] [<ffffffff81192a5b>] perf_pmu_sched_task+0x6b/0xb0
[ 411.331610] [<ffffffff8119746d>] __perf_event_task_sched_in+0x7d/0x150
[ 411.338145] [<ffffffff810dd9dc>] finish_task_switch+0x15c/0x200
[ 411.344078] [<ffffffff8167f894>] __schedule+0x274/0x6cc
[ 411.349325] [<ffffffff8167fdd9>] schedule+0x39/0x90
[ 411.354229] [<ffffffff81675398>] exit_to_usermode_loop+0x39/0x89
[ 411.360246] [<ffffffff810028ce>] prepare_exit_to_usermode+0x2e/0x30
[ 411.366524] [<ffffffff81683c1b>] retint_user+0x8/0x10
[ 411.371599] ---[ end trace 1ed61b8a551e95d3 ]---
Signed-off-by: David Carrillo-Cisneros <davidcc@...gle.com>
Reviewed-by: Stephane Eranian <eranian@...gle.com>
---
arch/x86/events/intel/lbr.c | 25 ++++++++++++++++++++++---
1 file changed, 22 insertions(+), 3 deletions(-)
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 2ee5dde..6cd7cc0 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -281,6 +281,21 @@ inline u64 lbr_from_signext_quirk_wr(u64 val)
return val;
}
+/*
+ * If quirk is needed, ensure sign extension is 61 bits.
+ */
+
+u64 lbr_from_signext_quirk_rd(u64 val)
+{
+ if (static_branch_unlikely(&lbr_from_quirk_key))
+ /*
+ * Quirk is on when TSX is not enabled. Therefore TSX
+ * flags must be read as OFF.
+ */
+ val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
+ return val;
+}
+
static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
{
int i;
@@ -297,7 +312,8 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
tos = task_ctx->tos;
for (i = 0; i < tos; i++) {
lbr_idx = (tos - i) & mask;
- wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
+ wrmsrl(x86_pmu.lbr_from + lbr_idx,
+ lbr_from_signext_quirk_wr(task_ctx->lbr_from[i]));
wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
@@ -310,7 +326,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
{
int i;
unsigned lbr_idx, mask;
- u64 tos;
+ u64 tos, val;
if (task_ctx->lbr_callstack_users == 0) {
task_ctx->lbr_stack_state = LBR_NONE;
@@ -321,7 +337,8 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
tos = intel_pmu_lbr_tos();
for (i = 0; i < tos; i++) {
lbr_idx = (tos - i) & mask;
- rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
+ rdmsrl(x86_pmu.lbr_from + lbr_idx, val);
+ task_ctx->lbr_from[i] = lbr_from_signext_quirk_rd(val);
rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
@@ -499,6 +516,8 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
int lbr_flags = lbr_desc[lbr_format];
rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
+ from = lbr_from_signext_quirk_rd(from);
+
rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
if (lbr_format == LBR_FORMAT_INFO && need_info) {
--
2.8.0.rc3.226.g39d4020
Powered by blists - more mailing lists