[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251104053449.1208800-1-irogers@google.com>
Date: Mon, 3 Nov 2025 21:34:47 -0800
From: Ian Rogers <irogers@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>, Namhyung Kim <namhyung@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>,
Ian Rogers <irogers@...gle.com>, Adrian Hunter <adrian.hunter@...el.com>,
"Dr. David Alan Gilbert" <linux@...blig.org>, Yang Li <yang.lee@...ux.alibaba.com>,
James Clark <james.clark@...aro.org>, Thomas Falcon <thomas.falcon@...el.com>,
Thomas Richter <tmricht@...ux.ibm.com>, linux-perf-users@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v1 1/3] perf tool_pmu: Use old_count when computing count
values for time events
When running in interval mode every third count of a time event isn't
showing properly:
```
$ perf stat -e duration_time -a -I 1000
1.001082862 1,002,290,425 duration_time
2.004264262 1,003,183,516 duration_time
3.007381401 <not counted> duration_time
4.011160141 1,003,705,631 duration_time
5.014515385 1,003,290,110 duration_time
6.018539680 <not counted> duration_time
7.022065321 1,003,591,720 duration_time
```
The regression came in with a different fix, found through bisection,
commit 68cb1567439f ("perf tool_pmu: Fix aggregation on
duration_time"). The issue is caused by the enabled and running time
of the event matching the old_count's and creating a delta of 0, which
is indicative of an error.
Fixes: 68cb1567439f ("perf tool_pmu: Fix aggregation on duration_time")
Signed-off-by: Ian Rogers <irogers@...gle.com>
---
tools/perf/builtin-stat.c | 16 +++++++++--
tools/perf/util/tool_pmu.c | 59 +++++++++++++++++++++-----------------
2 files changed, 46 insertions(+), 29 deletions(-)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 886727ae8529..d89fa9468f89 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -282,17 +282,27 @@ static int read_single_counter(struct evsel *counter, int cpu_map_idx, int threa
if (err && cpu_map_idx == 0 &&
(evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME ||
evsel__tool_event(counter) == TOOL_PMU__EVENT_SYSTEM_TIME)) {
- u64 val, *start_time;
struct perf_counts_values *count =
perf_counts(counter->counts, cpu_map_idx, thread);
+ struct perf_counts_values *old_count = NULL;
+ u64 val;
+
+ if (counter->prev_raw_counts)
+ old_count = perf_counts(counter->prev_raw_counts, cpu_map_idx, thread);
- start_time = xyarray__entry(counter->start_times, cpu_map_idx, thread);
if (evsel__tool_event(counter) == TOOL_PMU__EVENT_USER_TIME)
val = ru_stats.ru_utime_usec_stat.mean;
else
val = ru_stats.ru_stime_usec_stat.mean;
- count->ena = count->run = *start_time + val;
+
count->val = val;
+ if (old_count) {
+ count->run = old_count->run + 1;
+ count->ena = old_count->ena + 1;
+ } else {
+ count->run++;
+ count->ena++;
+ }
return 0;
}
return err;
diff --git a/tools/perf/util/tool_pmu.c b/tools/perf/util/tool_pmu.c
index f075098488ba..b895e88ff740 100644
--- a/tools/perf/util/tool_pmu.c
+++ b/tools/perf/util/tool_pmu.c
@@ -431,16 +431,39 @@ bool tool_pmu__read_event(enum tool_pmu_event ev, struct evsel *evsel, u64 *resu
}
}
+static void perf_counts__update(struct perf_counts_values *count,
+ const struct perf_counts_values *old_count,
+ bool raw, u64 val)
+{
+ /*
+ * The values of enabled and running must make a ratio of 100%. The
+ * exact values don't matter as long as they are non-zero to avoid
+ * issues with evsel__count_has_error.
+ */
+ if (old_count) {
+ count->val = raw ? val : old_count->val + val;
+ count->run = old_count->run + 1;
+ count->ena = old_count->ena + 1;
+ count->lost = old_count->lost;
+ } else {
+ count->val = val;
+ count->run++;
+ count->ena++;
+ count->lost = 0;
+ }
+}
+
int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
{
__u64 *start_time, cur_time, delta_start;
- u64 val;
- int fd, err = 0;
+ int err = 0;
struct perf_counts_values *count, *old_count = NULL;
bool adjust = false;
enum tool_pmu_event ev = evsel__tool_event(evsel);
count = perf_counts(evsel->counts, cpu_map_idx, thread);
+ if (evsel->prev_raw_counts)
+ old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
switch (ev) {
case TOOL_PMU__EVENT_HAS_PMEM:
@@ -451,26 +474,18 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
case TOOL_PMU__EVENT_NUM_PACKAGES:
case TOOL_PMU__EVENT_SLOTS:
case TOOL_PMU__EVENT_SMT_ON:
- case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ:
- if (evsel->prev_raw_counts)
- old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
- val = 0;
+ case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ: {
+ u64 val = 0;
+
if (cpu_map_idx == 0 && thread == 0) {
if (!tool_pmu__read_event(ev, evsel, &val)) {
count->lost++;
val = 0;
}
}
- if (old_count) {
- count->val = old_count->val + val;
- count->run = old_count->run + 1;
- count->ena = old_count->ena + 1;
- } else {
- count->val = val;
- count->run++;
- count->ena++;
- }
+ perf_counts__update(count, old_count, /*raw=*/false, val);
return 0;
+ }
case TOOL_PMU__EVENT_DURATION_TIME:
/*
* Pretend duration_time is only on the first CPU and thread, or
@@ -486,9 +501,9 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
case TOOL_PMU__EVENT_USER_TIME:
case TOOL_PMU__EVENT_SYSTEM_TIME: {
bool system = evsel__tool_event(evsel) == TOOL_PMU__EVENT_SYSTEM_TIME;
+ int fd = FD(evsel, cpu_map_idx, thread);
start_time = xyarray__entry(evsel->start_times, cpu_map_idx, thread);
- fd = FD(evsel, cpu_map_idx, thread);
lseek(fd, SEEK_SET, 0);
if (evsel->pid_stat) {
/* The event exists solely on 1 CPU. */
@@ -522,17 +537,9 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
if (adjust) {
__u64 ticks_per_sec = sysconf(_SC_CLK_TCK);
- delta_start *= 1000000000 / ticks_per_sec;
+ delta_start *= 1e9 / ticks_per_sec;
}
- count->val = delta_start;
- count->lost = 0;
- /*
- * The values of enabled and running must make a ratio of 100%. The
- * exact values don't matter as long as they are non-zero to avoid
- * issues with evsel__count_has_error.
- */
- count->ena++;
- count->run++;
+ perf_counts__update(count, old_count, /*raw=*/true, delta_start);
return 0;
}
--
2.51.2.1006.ga50a493c49-goog
Powered by blists - more mailing lists