[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251118211326.1840989-2-irogers@google.com>
Date: Tue, 18 Nov 2025 13:13:24 -0800
From: Ian Rogers <irogers@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>, Namhyung Kim <namhyung@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>,
Ian Rogers <irogers@...gle.com>, Adrian Hunter <adrian.hunter@...el.com>,
"Dr. David Alan Gilbert" <linux@...blig.org>, Yang Li <yang.lee@...ux.alibaba.com>,
James Clark <james.clark@...aro.org>, Thomas Falcon <thomas.falcon@...el.com>,
Thomas Richter <tmricht@...ux.ibm.com>, linux-perf-users@...r.kernel.org,
linux-kernel@...r.kernel.org, Andi Kleen <ak@...ux.intel.com>,
Dapeng Mi <dapeng1.mi@...ux.intel.com>
Subject: [PATCH v5 1/3] perf stat: Read tool events last
When reading a metric like memory bandwidth on multiple sockets, the
additional sockets will be on CPUS > 0. Because of the affinity
reading, the counters are read on CPU 0 along with the time, then the
later sockets are read. This can lead to the later sockets having a
bandwidth larger than is possible for the period of time. To avoid
this move the reading of tool events to occur after all other events
are read.
Signed-off-by: Ian Rogers <irogers@...gle.com>
---
tools/perf/builtin-stat.c | 45 +++++++++++++++++++++++++++++++++------
1 file changed, 39 insertions(+), 6 deletions(-)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index ca1c80c141b6..5c06e9b61821 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -366,7 +366,7 @@ static int read_counter_cpu(struct evsel *counter, int cpu_map_idx)
return 0;
}
-static int read_affinity_counters(void)
+static int read_counters_with_affinity(void)
{
struct evlist_cpu_iterator evlist_cpu_itr;
struct affinity saved_affinity, *affinity;
@@ -387,6 +387,9 @@ static int read_affinity_counters(void)
if (evsel__is_bpf(counter))
continue;
+ if (evsel__is_tool(counter))
+ continue;
+
if (!counter->err)
counter->err = read_counter_cpu(counter, evlist_cpu_itr.cpu_map_idx);
}
@@ -412,16 +415,46 @@ static int read_bpf_map_counters(void)
return 0;
}
-static int read_counters(void)
+static int read_tool_counters(void)
{
- if (!stat_config.stop_read_counter) {
- if (read_bpf_map_counters() ||
- read_affinity_counters())
- return -1;
+ struct evsel *counter;
+
+ evlist__for_each_entry(evsel_list, counter) {
+ int idx;
+
+ if (!evsel__is_tool(counter))
+ continue;
+
+ perf_cpu_map__for_each_idx(idx, counter->core.cpus) {
+ if (!counter->err)
+ counter->err = read_counter_cpu(counter, idx);
+ }
}
return 0;
}
+static int read_counters(void)
+{
+ int ret;
+
+ if (stat_config.stop_read_counter)
+ return 0;
+
+ // Read all BPF counters first.
+ ret = read_bpf_map_counters();
+ if (ret)
+ return ret;
+
+ // Read non-BPF and non-tool counters next.
+ ret = read_counters_with_affinity();
+ if (ret)
+ return ret;
+
+ // Read the tool counters last. This way the duration_time counter
+ // should always be greater than any other counter's enabled time.
+ return read_tool_counters();
+}
+
static void process_counters(void)
{
struct evsel *counter;
--
2.52.0.rc1.455.g30608eb744-goog
Powered by blists - more mailing lists