[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221114230227.1255976-20-namhyung@kernel.org>
Date: Mon, 14 Nov 2022 15:02:27 -0800
From: Namhyung Kim <namhyung@...nel.org>
To: Arnaldo Carvalho de Melo <acme@...nel.org>,
Jiri Olsa <jolsa@...nel.org>
Cc: Ingo Molnar <mingo@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
LKML <linux-kernel@...r.kernel.org>,
Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
linux-perf-users@...r.kernel.org,
Kan Liang <kan.liang@...ux.intel.com>,
Zhengjun Xing <zhengjun.xing@...ux.intel.com>,
James Clark <james.clark@....com>,
Athira Jajeev <atrajeev@...ux.vnet.ibm.com>
Subject: [PATCH 19/19] perf stat: Add print_aggr_cgroup() for --for-each-cgroup and --topdown
Normally, --for-each-cgroup only works with AGGR_GLOBAL. However
the --topdown on some cpu (e.g. Intel Skylake) converts it to the
AGGR_CORE internally.
To support those machines, add print_aggr_cgroup and handle the events
like in print_cgroup_events().
$ perf stat -a --for-each-cgroup system.slice,user.slice --topdown sleep 1
nmi_watchdog enabled with topdown. May give wrong results.
Disable with echo 0 > /proc/sys/kernel/nmi_watchdog
Performance counter stats for 'system wide':
retiring bad speculation frontend bound backend bound
S0-D0-C0 2 system.slice 49.0% -46.6% 31.4%
S0-D0-C1 2 system.slice 55.5% 8.0% 45.5% -9.0%
S0-D0-C2 2 system.slice 87.8% 22.1% 30.3% -40.3%
S0-D0-C3 2 system.slice 53.3% -11.9% 45.2% 13.4%
S0-D0-C0 2 user.slice 123.5% 4.0% 48.5% -75.9%
S0-D0-C1 2 user.slice 19.9% 6.5% 89.9% -16.3%
S0-D0-C2 2 user.slice 29.9% 7.9% 71.3% -9.1%
S0-D0-C3 2 user.slice 28.0% 7.2% 43.3% 21.5%
1.004136937 seconds time elapsed
Signed-off-by: Namhyung Kim <namhyung@...nel.org>
---
tools/perf/util/stat-display.c | 41 +++++++++++++++++++++++++++++++++-
1 file changed, 40 insertions(+), 1 deletion(-)
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index cf25ed99b5df..f5501760ff2e 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -900,6 +900,42 @@ static void print_aggr(struct perf_stat_config *config,
}
}
+static void print_aggr_cgroup(struct perf_stat_config *config,
+ struct evlist *evlist,
+ char *prefix)
+{
+ bool metric_only = config->metric_only;
+ struct evsel *counter, *evsel;
+ struct cgroup *cgrp = NULL;
+ int s;
+
+ if (!config->aggr_map || !config->aggr_get_id)
+ return;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (cgrp == evsel->cgrp)
+ continue;
+
+ cgrp = evsel->cgrp;
+
+ for (s = 0; s < config->aggr_map->nr; s++) {
+ print_metric_begin(config, evlist, prefix, s, cgrp);
+
+ evlist__for_each_entry(evlist, counter) {
+ if (counter->merged_stat)
+ continue;
+
+ if (counter->cgrp != cgrp)
+ continue;
+
+ print_counter_aggrdata(config, counter, s, prefix,
+ metric_only);
+ }
+ print_metric_end(config);
+ }
+ }
+}
+
static void print_counter(struct perf_stat_config *config,
struct evsel *counter, char *prefix)
{
@@ -1361,7 +1397,10 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
case AGGR_DIE:
case AGGR_SOCKET:
case AGGR_NODE:
- print_aggr(config, evlist, prefix);
+ if (config->cgroup_list)
+ print_aggr_cgroup(config, evlist, prefix);
+ else
+ print_aggr(config, evlist, prefix);
break;
case AGGR_THREAD:
case AGGR_GLOBAL:
--
2.38.1.493.g58b659f92b-goog
Powered by blists - more mailing lists