[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250825211204.2784695-3-irogers@google.com>
Date: Mon, 25 Aug 2025 14:12:03 -0700
From: Ian Rogers <irogers@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>, Namhyung Kim <namhyung@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>,
Ian Rogers <irogers@...gle.com>, Adrian Hunter <adrian.hunter@...el.com>,
Kan Liang <kan.liang@...ux.intel.com>, James Clark <james.clark@...aro.org>,
Greg Kroah-Hartman <gregkh@...uxfoundation.org>, Yoshihiro Furudera <fj5100bi@...itsu.com>,
Dapeng Mi <dapeng1.mi@...ux.intel.com>, Howard Chu <howardchu95@...il.com>,
Thomas Falcon <thomas.falcon@...el.com>, Andi Kleen <ak@...ux.intel.com>,
linux-perf-users@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH v1 2/3] perf stat: Don't skip failing group events
Pass errno to stat_handle_error rather than reading errno after it has
potentially been clobbered. Move "skippable" handling first as a
skippable event (from the perf stat default list) should always just
be skipped.
Remove logic to skip rather than fail events in a group when they
aren't the group leader. The original logic was added in commit
cb5ef60067c1 ("perf stat: Error out unsupported group leader
immediately") due to error handling and opening being together and an
assertion being raised. Not failing this case causes broken groups to
not report values, particularly for topdown events.
Reported-by: Dapeng Mi <dapeng1.mi@...ux.intel.com>
Closes: https://lore.kernel.org/lkml/20250822082233.1850417-1-dapeng1.mi@linux.intel.com/
Signed-off-by: Ian Rogers <irogers@...gle.com>
---
tools/perf/builtin-stat.c | 48 +++++++++++++++++----------------------
1 file changed, 21 insertions(+), 27 deletions(-)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 2c38dd98f6ca..ab567919b89a 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -613,33 +613,40 @@ enum counter_recovery {
COUNTER_FATAL,
};
-static enum counter_recovery stat_handle_error(struct evsel *counter)
+static enum counter_recovery stat_handle_error(struct evsel *counter, int err)
{
char msg[BUFSIZ];
+
+ if (counter->skippable) {
+ if (verbose > 0) {
+ ui__warning("skipping event %s that kernel failed to open .\n",
+ evsel__name(counter));
+ }
+ counter->supported = false;
+ counter->errored = true;
+ return COUNTER_SKIP;
+ }
+
/*
* PPC returns ENXIO for HW counters until 2.6.37
* (behavior changed with commit b0a873e).
*/
- if (errno == EINVAL || errno == ENOSYS ||
- errno == ENOENT || errno == ENXIO) {
- if (verbose > 0)
+ if (err == EINVAL || err == ENOSYS || err == ENOENT || err == ENXIO) {
+ if (verbose > 0) {
ui__warning("%s event is not supported by the kernel.\n",
evsel__name(counter));
+ }
counter->supported = false;
/*
* errored is a sticky flag that means one of the counter's
* cpu event had a problem and needs to be reexamined.
*/
counter->errored = true;
-
- if ((evsel__leader(counter) != counter) ||
- !(counter->core.leader->nr_members > 1))
- return COUNTER_SKIP;
- } else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) {
+ } else if (evsel__fallback(counter, &target, err, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
return COUNTER_RETRY;
- } else if (target__has_per_thread(&target) && errno != EOPNOTSUPP &&
+ } else if (target__has_per_thread(&target) && err != EOPNOTSUPP &&
evsel_list->core.threads &&
evsel_list->core.threads->err_thread != -1) {
/*
@@ -651,29 +658,16 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
evsel_list->core.threads->err_thread = -1;
return COUNTER_RETRY;
}
- } else if (counter->skippable) {
- if (verbose > 0)
- ui__warning("skipping event %s that kernel failed to open .\n",
- evsel__name(counter));
- counter->supported = false;
- counter->errored = true;
- return COUNTER_SKIP;
- }
-
- if (errno == EOPNOTSUPP) {
+ } else if (err == EOPNOTSUPP) {
if (verbose > 0) {
ui__warning("%s event is not supported by the kernel.\n",
evsel__name(counter));
}
counter->supported = false;
counter->errored = true;
-
- if ((evsel__leader(counter) != counter) ||
- !(counter->core.leader->nr_members > 1))
- return COUNTER_SKIP;
}
- evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
+ evsel__open_strerror(counter, &target, err, msg, sizeof(msg));
ui__error("%s\n", msg);
if (child_pid != -1)
@@ -761,7 +755,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
continue;
}
- switch (stat_handle_error(counter)) {
+ switch (stat_handle_error(counter, errno)) {
case COUNTER_FATAL:
err = -1;
goto err_out;
@@ -803,7 +797,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
if (create_perf_stat_counter(counter, &stat_config, &target,
evlist_cpu_itr.cpu_map_idx) < 0) {
- switch (stat_handle_error(counter)) {
+ switch (stat_handle_error(counter, errno)) {
case COUNTER_FATAL:
err = -1;
goto err_out;
--
2.51.0.261.g7ce5a0a67e-goog
Powered by blists - more mailing lists