[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1434576155-30038-6-git-send-email-acme@kernel.org>
Date: Wed, 17 Jun 2015 18:22:32 -0300
From: Arnaldo Carvalho de Melo <acme@...nel.org>
To: Ingo Molnar <mingo@...nel.org>
Cc: linux-kernel@...r.kernel.org,
Masami Hiramatsu <masami.hiramatsu.pt@...achi.com>,
David Ahern <dsahern@...il.com>, Jiri Olsa <jolsa@...hat.com>,
Namhyung Kim <namhyung@...nel.org>,
Naohiro Aota <naota@...sp.net>,
Peter Zijlstra <peterz@...radead.org>,
Arnaldo Carvalho de Melo <acme@...hat.com>
Subject: [PATCH 5/8] perf probe: Speed up perf probe --list by caching debuginfo
From: Masami Hiramatsu <masami.hiramatsu.pt@...achi.com>
Speed up the "perf probe --list" by caching the last used debuginfo.
perf probe --list always open and load debuginfo for each entry of probe
list. This takes very a long time.
E.g. with vfs_* events (total 96 probes)
[root@...alhost perf]# time ./perf probe -l &> /dev/null
real 0m25.376s
user 0m24.381s
sys 0m1.012s
To solve this issue, this adds debuginfo_cache to cache the
last used debuginfo on memory.
With this fix, the perf-probe --list significantly improves
its speed.
[root@...alhost perf]# time ./perf probe -l &> /dev/null
real 0m0.161s
user 0m0.136s
sys 0m0.025s
Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@...achi.com>
Tested-by: Arnaldo Carvalho de Melo <acme@...hat.com>
Cc: David Ahern <dsahern@...il.com>
Cc: Jiri Olsa <jolsa@...hat.com>
Cc: Namhyung Kim <namhyung@...nel.org>
Cc: Naohiro Aota <naota@...sp.net>
Cc: Peter Zijlstra <peterz@...radead.org>
Link: http://lkml.kernel.org/r/20150617145854.19715.15314.stgit@localhost.localdomain
Signed-off-by: Arnaldo Carvalho de Melo <acme@...hat.com>
---
tools/perf/util/probe-event.c | 48 +++++++++++++++++++++++++++++++++++++++----
1 file changed, 44 insertions(+), 4 deletions(-)
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 65a1c8252270..076527b639bd 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -429,6 +429,41 @@ static struct debuginfo *open_debuginfo(const char *module, bool silent)
return ret;
}
+/* For caching the last debuginfo */
+static struct debuginfo *debuginfo_cache;
+static char *debuginfo_cache_path;
+
+static struct debuginfo *debuginfo_cache__open(const char *module, bool silent)
+{
+ if ((debuginfo_cache_path && !strcmp(debuginfo_cache_path, module)) ||
+ (!debuginfo_cache_path && !module && debuginfo_cache))
+ goto out;
+
+ /* Copy module path */
+ free(debuginfo_cache_path);
+ if (module) {
+ debuginfo_cache_path = strdup(module);
+ if (!debuginfo_cache_path) {
+ debuginfo__delete(debuginfo_cache);
+ debuginfo_cache = NULL;
+ goto out;
+ }
+ }
+
+ debuginfo_cache = open_debuginfo(module, silent);
+ if (!debuginfo_cache)
+ zfree(&debuginfo_cache_path);
+out:
+ return debuginfo_cache;
+}
+
+static void debuginfo_cache__exit(void)
+{
+ debuginfo__delete(debuginfo_cache);
+ debuginfo_cache = NULL;
+ zfree(&debuginfo_cache_path);
+}
+
static int get_text_start_address(const char *exec, unsigned long *address)
{
@@ -490,12 +525,11 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
pr_debug("try to find information at %" PRIx64 " in %s\n", addr,
tp->module ? : "kernel");
- dinfo = open_debuginfo(tp->module, verbose == 0);
- if (dinfo) {
+ dinfo = debuginfo_cache__open(tp->module, verbose == 0);
+ if (dinfo)
ret = debuginfo__find_probe_point(dinfo,
(unsigned long)addr, pp);
- debuginfo__delete(dinfo);
- } else
+ else
ret = -ENOENT;
if (ret > 0) {
@@ -930,6 +964,10 @@ out:
#else /* !HAVE_DWARF_SUPPORT */
+static void debuginfo_cache__exit(void)
+{
+}
+
static int
find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused,
struct perf_probe_point *pp __maybe_unused,
@@ -2266,6 +2304,8 @@ next:
break;
}
strlist__delete(rawlist);
+ /* Cleanup cached debuginfo if needed */
+ debuginfo_cache__exit();
return ret;
}
--
2.1.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists