[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20200220052701.7754-4-leo.yan@linaro.org>
Date: Thu, 20 Feb 2020 13:26:55 +0800
From: Leo Yan <leo.yan@...aro.org>
To: Arnaldo Carvalho de Melo <acme@...nel.org>,
Mathieu Poirier <mathieu.poirier@...aro.org>,
Suzuki K Poulose <suzuki.poulose@....com>,
Mark Rutland <mark.rutland@....com>,
Mike Leach <mike.leach@...aro.org>,
Robert Walker <robert.walker@....com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
Coresight ML <coresight@...ts.linaro.org>
Cc: Leo Yan <leo.yan@...aro.org>
Subject: [PATCH v5 3/9] perf cs-etm: Refactor instruction size handling
cs-etm.c has several functions which need to know instruction size
based on address, e.g. cs_etm__instr_addr() and cs_etm__copy_insn()
two functions both calculate the instruction size separately with its
duplicated code. Furthermore, adding new features later which might
require to calculate instruction size as well.
For this reason, this patch refactors the code to introduce a new
function cs_etm__instr_size(), this function is central place to
calculate the instruction size based on ISA type and instruction
address.
Given the trace data can be MB and most likely that will be A64/A32 on
a lot of the current and future platforms, cs_etm__instr_addr() keeps a
single ISA type check for non T32, for this case it executes an
optimized calculation (addr + offset * 4).
Signed-off-by: Leo Yan <leo.yan@...aro.org>
---
tools/perf/util/cs-etm.c | 52 ++++++++++++++++++++++++----------------
1 file changed, 32 insertions(+), 20 deletions(-)
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 7cf30b5e0e20..f3ba2cfb634f 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -935,6 +935,26 @@ static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
}
+static inline int cs_etm__instr_size(struct cs_etm_queue *etmq,
+ u8 trace_chan_id,
+ enum cs_etm_isa isa,
+ u64 addr)
+{
+ int insn_len;
+
+ /*
+ * T32 instruction size might be 32-bit or 16-bit, decide by calling
+ * cs_etm__t32_instr_size().
+ */
+ if (isa == CS_ETM_ISA_T32)
+ insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id, addr);
+ /* Otherwise, A64 and A32 instruction size are always 32-bit. */
+ else
+ insn_len = 4;
+
+ return insn_len;
+}
+
static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
{
/* Returns 0 for the CS_ETM_DISCONTINUITY packet */
@@ -959,19 +979,19 @@ static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
const struct cs_etm_packet *packet,
u64 offset)
{
- if (packet->isa == CS_ETM_ISA_T32) {
- u64 addr = packet->start_addr;
+ u64 addr = packet->start_addr;
- while (offset) {
- addr += cs_etm__t32_instr_size(etmq,
- trace_chan_id, addr);
- offset--;
- }
- return addr;
+ /* Optimize calculation for non T32 */
+ if (packet->isa != CS_ETM_ISA_T32)
+ return addr + offset * 4;
+
+ while (offset) {
+ addr += cs_etm__instr_size(etmq, trace_chan_id,
+ packet->isa, addr);
+ offset--;
}
- /* Assume a 4 byte instruction size (A32/A64) */
- return packet->start_addr + offset * 4;
+ return addr;
}
static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
@@ -1111,16 +1131,8 @@ static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
return;
}
- /*
- * T32 instruction size might be 32-bit or 16-bit, decide by calling
- * cs_etm__t32_instr_size().
- */
- if (packet->isa == CS_ETM_ISA_T32)
- sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
- sample->ip);
- /* Otherwise, A64 and A32 instruction size are always 32-bit. */
- else
- sample->insn_len = 4;
+ sample->insn_len = cs_etm__instr_size(etmq, trace_chan_id,
+ packet->isa, sample->ip);
cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
sample->insn_len, (void *)sample->insn);
--
2.17.1
Powered by blists - more mailing lists