[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20181031142703.GK10660@kernel.org>
Date: Wed, 31 Oct 2018 11:27:03 -0300
From: Arnaldo Carvalho de Melo <acme@...nel.org>
To: Adrian Hunter <adrian.hunter@...el.com>
Cc: Jiri Olsa <jolsa@...hat.com>, Andi Kleen <ak@...ux.intel.com>,
linux-kernel@...r.kernel.org, leo.yan@...aro.org,
David Miller <davem@...emloft.net>,
Mathieu Poirier <mathieu.poirier@...aro.org>
Subject: Re: [PATCH 4/5] perf intel-pt: Insert callchain context into
synthesized callchains
Em Wed, Oct 31, 2018 at 04:20:45PM +0200, Adrian Hunter escreveu:
> On 31/10/18 4:15 PM, Adrian Hunter wrote:
> > On 31/10/18 3:28 PM, Arnaldo Carvalho de Melo wrote:
> >> Em Wed, Oct 31, 2018 at 11:10:42AM +0200, Adrian Hunter escreveu:
> >>> In the absence of a fallback, callchains must encode also the callchain
> >>> context. Do that now there is no fallback.
> >>
> >> So, this one is independent of the first 3 patches, right?
> >
> > Yes. I was just going to test it separately when I noticed I had
> > screwed up my earlier testing. When I re-tested I discovered this patch
> > has an off-by-one error:
> >
> > diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
> > index afdf36852ac8..61a4286a74dc 100644
> > --- a/tools/perf/util/thread-stack.c
> > +++ b/tools/perf/util/thread-stack.c
> > @@ -337,7 +337,7 @@ void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
> >
> > last_context = context;
> >
> > - for (i = 2, j = 0; i < sz && j < thread->ts->cnt; i++, j++) {
> > + for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) {
> > ip = thread->ts->stack[thread->ts->cnt - j].ret_addr;
> > context = callchain_context(ip, kernel_start);
> > if (context != last_context) {
> >
> > Shall I send V2?
>
> I have attached it
Thanks!
> >From 19a9fbdbd3729c7f14c9c1f0ac79c18cce91ff60 Mon Sep 17 00:00:00 2001
> From: Adrian Hunter <adrian.hunter@...el.com>
> Date: Wed, 31 Oct 2018 09:31:25 +0200
> Subject: [PATCH V2 4/5] perf intel-pt: Insert callchain context into
> synthesized callchains
>
> In the absence of a fallback, callchains must encode also the callchain
> context. Do that now there is no fallback.
>
> Signed-off-by: Adrian Hunter <adrian.hunter@...el.com>
> Cc: stable@...r.kernel.org # 4.19
> ---
>
>
> Changes in V2:
>
> Fix off-by-one error
>
>
> tools/perf/util/intel-pt.c | 6 +++--
> tools/perf/util/thread-stack.c | 44 +++++++++++++++++++++++++++-------
> tools/perf/util/thread-stack.h | 2 +-
> 3 files changed, 40 insertions(+), 12 deletions(-)
>
> diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
> index ffa385a029b3..60732213d16a 100644
> --- a/tools/perf/util/intel-pt.c
> +++ b/tools/perf/util/intel-pt.c
> @@ -759,7 +759,8 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
> if (pt->synth_opts.callchain) {
> size_t sz = sizeof(struct ip_callchain);
>
> - sz += pt->synth_opts.callchain_sz * sizeof(u64);
> + /* Add 1 to callchain_sz for callchain context */
> + sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
> ptq->chain = zalloc(sz);
> if (!ptq->chain)
> goto out_free;
> @@ -1160,7 +1161,8 @@ static void intel_pt_prep_sample(struct intel_pt *pt,
>
> if (pt->synth_opts.callchain) {
> thread_stack__sample(ptq->thread, ptq->chain,
> - pt->synth_opts.callchain_sz, sample->ip);
> + pt->synth_opts.callchain_sz + 1,
> + sample->ip, pt->kernel_start);
> sample->callchain = ptq->chain;
> }
>
> diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
> index c091635bf7dc..61a4286a74dc 100644
> --- a/tools/perf/util/thread-stack.c
> +++ b/tools/perf/util/thread-stack.c
> @@ -310,20 +310,46 @@ void thread_stack__free(struct thread *thread)
> }
> }
>
> +static inline u64 callchain_context(u64 ip, u64 kernel_start)
> +{
> + return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
> +}
> +
> void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
> - size_t sz, u64 ip)
> + size_t sz, u64 ip, u64 kernel_start)
> {
> - size_t i;
> + u64 context = callchain_context(ip, kernel_start);
> + u64 last_context;
> + size_t i, j;
>
> - if (!thread || !thread->ts)
> - chain->nr = 1;
> - else
> - chain->nr = min(sz, thread->ts->cnt + 1);
> + if (sz < 2) {
> + chain->nr = 0;
> + return;
> + }
>
> - chain->ips[0] = ip;
> + chain->ips[0] = context;
> + chain->ips[1] = ip;
> +
> + if (!thread || !thread->ts) {
> + chain->nr = 2;
> + return;
> + }
> +
> + last_context = context;
> +
> + for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) {
> + ip = thread->ts->stack[thread->ts->cnt - j].ret_addr;
> + context = callchain_context(ip, kernel_start);
> + if (context != last_context) {
> + if (i >= sz - 1)
> + break;
> + chain->ips[i++] = context;
> + last_context = context;
> + }
> + chain->ips[i] = ip;
> + }
>
> - for (i = 1; i < chain->nr; i++)
> - chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
> + chain->nr = i;
> }
>
> struct call_return_processor *
> diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
> index b7e41c4ebfdd..f97c00a8c251 100644
> --- a/tools/perf/util/thread-stack.h
> +++ b/tools/perf/util/thread-stack.h
> @@ -84,7 +84,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
> u64 to_ip, u16 insn_len, u64 trace_nr);
> void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr);
> void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
> - size_t sz, u64 ip);
> + size_t sz, u64 ip, u64 kernel_start);
> int thread_stack__flush(struct thread *thread);
> void thread_stack__free(struct thread *thread);
> size_t thread_stack__depth(struct thread *thread);
> --
> 2.17.1
>
Powered by blists - more mailing lists