[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221215192817.2734573-8-namhyung@kernel.org>
Date: Thu, 15 Dec 2022 11:28:15 -0800
From: Namhyung Kim <namhyung@...nel.org>
To: Arnaldo Carvalho de Melo <acme@...nel.org>,
Jiri Olsa <jolsa@...nel.org>
Cc: Ingo Molnar <mingo@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
LKML <linux-kernel@...r.kernel.org>,
Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
linux-perf-users@...r.kernel.org, Andi Kleen <ak@...ux.intel.com>,
Milian Wolff <milian.wolff@...b.com>,
Leo Yan <leo.yan@...aro.org>
Subject: [PATCH 7/9] perf hist: Improve srcline sort key performance
The sort_entry->cmp() will be called for eventy sample data to find a
matching entry. When it has 'srcline' sort key, that means it needs to
call addr2line or libbfd everytime.
This is not optimal because many samples will have same address and it
just can call addr2line once. So postpone the actual srcline check to
the sort_entry->collpase() and compare addresses in ->cmp().
Also it needs to add ->init() callback to make sure it has srcline info.
If a sample has a unique data, chances are the entry can be sorted out
by other (previous) keys and callbacks in sort_srcline never called.
Signed-off-by: Namhyung Kim <namhyung@...nel.org>
---
tools/perf/util/sort.c | 29 +++++++++++++++++++++++++++--
1 file changed, 27 insertions(+), 2 deletions(-)
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index f6333b3dca35..913045c5b2b2 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -373,6 +373,18 @@ char *hist_entry__srcline(struct hist_entry *he)
static int64_t
sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ int64_t ret;
+
+ ret = _sort__addr_cmp(left->ip, right->ip);
+ if (ret)
+ return ret;
+
+ return sort__dso_cmp(left, right);
+}
+
+static int64_t
+sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
{
if (!left->srcline)
left->srcline = hist_entry__srcline(left);
@@ -382,18 +394,31 @@ sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
return strcmp(right->srcline, left->srcline);
}
-static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
- size_t size, unsigned int width)
+static int64_t
+sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
+{
+ return sort__srcline_collapse(left, right);
+}
+
+static void
+sort__srcline_init(struct hist_entry *he)
{
if (!he->srcline)
he->srcline = hist_entry__srcline(he);
+}
+static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
}
struct sort_entry sort_srcline = {
.se_header = "Source:Line",
.se_cmp = sort__srcline_cmp,
+ .se_collapse = sort__srcline_collapse,
+ .se_sort = sort__srcline_sort,
+ .se_init = sort__srcline_init,
.se_snprintf = hist_entry__srcline_snprintf,
.se_width_idx = HISTC_SRCLINE,
};
--
2.39.0.314.g84b9a713c41-goog
Powered by blists - more mailing lists