[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180422135304.872016120@linuxfoundation.org>
Date: Sun, 22 Apr 2018 15:52:42 +0200
From: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
To: linux-kernel@...r.kernel.org
Cc: Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
stable@...r.kernel.org, Adrian Hunter <adrian.hunter@...el.com>,
Jiri Olsa <jolsa@...hat.com>,
Arnaldo Carvalho de Melo <acme@...hat.com>
Subject: [PATCH 4.4 04/97] perf intel-pt: Fix sync_switch
4.4-stable review patch. If anyone has any objections, please let me know.
------------------
From: Adrian Hunter <adrian.hunter@...el.com>
commit 63d8e38f6ae6c36dd5b5ba0e8c112e8861532ea2 upstream.
sync_switch is a facility to synchronize decoding more closely with the
point in the kernel when the context actually switched.
The flag when sync_switch is enabled was global to the decoding, whereas
it is really specific to the CPU.
The trace data for different CPUs is put on different queues, so add
sync_switch to the intel_pt_queue structure and use that in preference
to the global setting in the intel_pt structure.
That fixes problems decoding one CPU's trace because sync_switch was
disabled on a different CPU's queue.
Signed-off-by: Adrian Hunter <adrian.hunter@...el.com>
Cc: Jiri Olsa <jolsa@...hat.com>
Cc: stable@...r.kernel.org
Link: http://lkml.kernel.org/r/1520431349-30689-3-git-send-email-adrian.hunter@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@...hat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@...uxfoundation.org>
---
tools/perf/util/intel-pt.c | 32 +++++++++++++++++++++++++-------
1 file changed, 25 insertions(+), 7 deletions(-)
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -125,6 +125,7 @@ struct intel_pt_queue {
bool stop;
bool step_through_buffers;
bool use_buffer_pid_tid;
+ bool sync_switch;
pid_t pid, tid;
int cpu;
int switch_state;
@@ -852,10 +853,12 @@ static int intel_pt_setup_queue(struct i
if (pt->timeless_decoding || !pt->have_sched_switch)
ptq->use_buffer_pid_tid = true;
}
+
+ ptq->sync_switch = pt->sync_switch;
}
if (!ptq->on_heap &&
- (!pt->sync_switch ||
+ (!ptq->sync_switch ||
ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
const struct intel_pt_state *state;
int ret;
@@ -1238,7 +1241,7 @@ static int intel_pt_sample(struct intel_
if (pt->synth_opts.last_branch)
intel_pt_update_last_branch_rb(ptq);
- if (!pt->sync_switch)
+ if (!ptq->sync_switch)
return 0;
if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
@@ -1319,6 +1322,21 @@ static u64 intel_pt_switch_ip(struct int
return switch_ip;
}
+static void intel_pt_enable_sync_switch(struct intel_pt *pt)
+{
+ unsigned int i;
+
+ pt->sync_switch = true;
+
+ for (i = 0; i < pt->queues.nr_queues; i++) {
+ struct auxtrace_queue *queue = &pt->queues.queue_array[i];
+ struct intel_pt_queue *ptq = queue->priv;
+
+ if (ptq)
+ ptq->sync_switch = true;
+ }
+}
+
static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
{
const struct intel_pt_state *state = ptq->state;
@@ -1335,7 +1353,7 @@ static int intel_pt_run_decoder(struct i
if (pt->switch_ip) {
intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
pt->switch_ip, pt->ptss_ip);
- pt->sync_switch = true;
+ intel_pt_enable_sync_switch(pt);
}
}
}
@@ -1351,9 +1369,9 @@ static int intel_pt_run_decoder(struct i
if (state->err) {
if (state->err == INTEL_PT_ERR_NODATA)
return 1;
- if (pt->sync_switch &&
+ if (ptq->sync_switch &&
state->from_ip >= pt->kernel_start) {
- pt->sync_switch = false;
+ ptq->sync_switch = false;
intel_pt_next_tid(pt, ptq);
}
if (pt->synth_opts.errors) {
@@ -1379,7 +1397,7 @@ static int intel_pt_run_decoder(struct i
state->timestamp, state->est_timestamp);
ptq->timestamp = state->est_timestamp;
/* Use estimated TSC in unknown switch state */
- } else if (pt->sync_switch &&
+ } else if (ptq->sync_switch &&
ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
intel_pt_is_switch_ip(ptq, state->to_ip) &&
ptq->next_tid == -1) {
@@ -1526,7 +1544,7 @@ static int intel_pt_sync_switch(struct i
return 1;
ptq = intel_pt_cpu_to_ptq(pt, cpu);
- if (!ptq)
+ if (!ptq || !ptq->sync_switch)
return 1;
switch (ptq->switch_state) {
Powered by blists - more mailing lists