[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20170517042645.GB32691@danjae.aot.lge.com>
Date: Wed, 17 May 2017 13:26:45 +0900
From: Namhyung Kim <namhyung@...nel.org>
To: Milian Wolff <milian.wolff@...b.com>
Cc: Linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
Arnaldo Carvalho de Melo <acme@...hat.com>,
David Ahern <dsahern@...il.com>,
Peter Zijlstra <a.p.zijlstra@...llo.nl>,
Yao Jin <yao.jin@...ux.intel.com>, kernel-team@....com
Subject: Re: [PATCH] perf report: always honor callchain order for inlined
nodes
On Tue, May 16, 2017 at 11:54:22PM +0200, Milian Wolff wrote:
> So far, the inlined nodes where only reversed when we built perf
> against libbfd. If that was not available, the addr2line fallback
> code path was missing the inline_list__reverse call.
>
> Now this is done at the central place inside dso__parse_addr_inlines.
What about changing inline_list__append() instead to honor callchain
order?
if (ORDER_CALLEE)
list_add_tail(...);
else
list_add(...);
Thanks,
Namhyung
>
> Cc: Arnaldo Carvalho de Melo <acme@...hat.com>
> Cc: David Ahern <dsahern@...il.com>
> Cc: Namhyung Kim <namhyung@...nel.org>
> Cc: Peter Zijlstra <a.p.zijlstra@...llo.nl>
> Cc: Yao Jin <yao.jin@...ux.intel.com>
> Signed-off-by: Milian Wolff <milian.wolff@...b.com>
> ---
> tools/perf/util/srcline.c | 28 ++++++++++++++--------------
> 1 file changed, 14 insertions(+), 14 deletions(-)
>
> diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
> index 62cf42c36955..8df6b29bf984 100644
> --- a/tools/perf/util/srcline.c
> +++ b/tools/perf/util/srcline.c
> @@ -61,6 +61,14 @@ static int inline_list__append(char *filename, char *funcname, int line_nr,
> return 0;
> }
>
> +static void inline_list__reverse(struct inline_node *node)
> +{
> + struct inline_list *ilist, *n;
> +
> + list_for_each_entry_safe_reverse(ilist, n, &node->val, list)
> + list_move_tail(&ilist->list, &node->val);
> +}
> +
> #ifdef HAVE_LIBBFD_SUPPORT
>
> /*
> @@ -200,14 +208,6 @@ static void addr2line_cleanup(struct a2l_data *a2l)
>
> #define MAX_INLINE_NEST 1024
>
> -static void inline_list__reverse(struct inline_node *node)
> -{
> - struct inline_list *ilist, *n;
> -
> - list_for_each_entry_safe_reverse(ilist, n, &node->val, list)
> - list_move_tail(&ilist->list, &node->val);
> -}
> -
> static int addr2line(const char *dso_name, u64 addr,
> char **file, unsigned int *line, struct dso *dso,
> bool unwind_inlines, struct inline_node *node)
> @@ -245,11 +245,6 @@ static int addr2line(const char *dso_name, u64 addr,
> return 0;
> }
> }
> -
> - if ((node != NULL) &&
> - (callchain_param.order != ORDER_CALLEE)) {
> - inline_list__reverse(node);
> - }
> }
>
> if (a2l->found && a2l->filename) {
> @@ -493,12 +488,17 @@ char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
> struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr)
> {
> const char *dso_name;
> + struct inline_node *node;
>
> dso_name = dso__name(dso);
> if (dso_name == NULL)
> return NULL;
>
> - return addr2inlines(dso_name, addr, dso);
> + node = addr2inlines(dso_name, addr, dso);
> + if (node && callchain_param.order != ORDER_CALLEE)
> + inline_list__reverse(node);
> +
> + return node;
> }
>
> void inline_node__delete(struct inline_node *node)
> --
> 2.13.0
>
Powered by blists - more mailing lists