lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 31 Oct 2013 15:56:10 +0900
From:	Namhyung Kim <namhyung@...nel.org>
To:	Arnaldo Carvalho de Melo <acme@...stprotocols.net>
Cc:	Peter Zijlstra <a.p.zijlstra@...llo.nl>,
	Paul Mackerras <paulus@...ba.org>,
	Ingo Molnar <mingo@...nel.org>,
	Namhyung Kim <namhyung.kim@....com>,
	LKML <linux-kernel@...r.kernel.org>,
	Frederic Weisbecker <fweisbec@...il.com>,
	Stephane Eranian <eranian@...gle.com>,
	Jiri Olsa <jolsa@...hat.com>,
	Rodrigo Campos <rodrigo@...g.com.ar>,
	Arun Sharma <asharma@...com>
Subject: [PATCH 08/14] perf report: Cache cumulative callchains

From: Namhyung Kim <namhyung.kim@....com>

It is possble that a callchain has cycles or recursive calls.  In that
case it'll end up having entries more than 100% overhead in the
output.  In order to prevent such entries, cache each callchain node
and skip if same entry already cumulated.

Cc: Arun Sharma <asharma@...com>
Cc: Frederic Weisbecker <fweisbec@...il.com>
Signed-off-by: Namhyung Kim <namhyung@...nel.org>
---
 tools/perf/builtin-report.c | 49 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 49 insertions(+)

diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 1b152a8b7f51..1a0de9a4a568 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -387,6 +387,11 @@ iter_finish_normal_entry(struct add_entry_iter *iter, struct addr_location *al)
 	return err;
 }
 
+struct cumulative_cache {
+	struct dso *dso;
+	struct symbol *sym;
+};
+
 static int
 iter_prepare_cumulative_entry(struct add_entry_iter *iter,
 			      struct machine *machine,
@@ -394,9 +399,31 @@ iter_prepare_cumulative_entry(struct add_entry_iter *iter,
 			      struct addr_location *al __maybe_unused,
 			      struct perf_sample *sample)
 {
+	struct callchain_cursor_node *node;
+	struct cumulative_cache *ccache;
+
 	callchain_cursor_commit(&callchain_cursor);
 
 	/*
+	 * This is for detecting cycles or recursions so that they're
+	 * cumulated only one time to prevent entries more than 100%
+	 * overhead.
+	 */
+	ccache = malloc(sizeof(*ccache) * PERF_MAX_STACK_DEPTH);
+	if (ccache == NULL)
+		return -ENOMEM;
+
+	node = callchain_cursor_current(&callchain_cursor);
+	if (node == NULL)
+		return 0;
+
+	ccache[0].dso = node->map->dso;
+	ccache[0].sym = node->sym;
+
+	iter->priv = ccache;
+	iter->curr = 1;
+
+	/*
 	 * The first callchain node always contains same information
 	 * as a hist entry itself.  So skip it in order to prevent
 	 * double accounting.
@@ -501,8 +528,29 @@ iter_add_next_cumulative_entry(struct add_entry_iter *iter,
 {
 	struct perf_evsel *evsel = iter->evsel;
 	struct perf_sample *sample = iter->sample;
+	struct cumulative_cache *ccache = iter->priv;
 	struct hist_entry *he;
 	int err = 0;
+	int i;
+
+	/*
+	 * Check if there's duplicate entries in the callchain.
+	 * It's possible that it has cycles or recursive calls.
+	 */
+	for (i = 0; i < iter->curr; i++) {
+		if (sort__has_sym) {
+			if (ccache[i].sym == al->sym)
+				return 0;
+		} else {
+			/* Not much we can do - just compare the dso. */
+			if (ccache[i].dso == al->map->dso)
+				return 0;
+		}
+	}
+
+	ccache[i].dso = al->map->dso;
+	ccache[i].sym = al->sym;
+	iter->curr++;
 
 	he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
 				sample->period, sample->weight,
@@ -538,6 +586,7 @@ iter_finish_cumulative_entry(struct add_entry_iter *iter,
 	evsel->hists.stats.total_period += sample->period;
 	hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
 
+	free(iter->priv);
 	return 0;
 }
 
-- 
1.7.11.7

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ