[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251002200604.1792141-1-irogers@google.com>
Date: Thu, 2 Oct 2025 13:06:03 -0700
From: Ian Rogers <irogers@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>, Namhyung Kim <namhyung@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>,
Ian Rogers <irogers@...gle.com>, Adrian Hunter <adrian.hunter@...el.com>,
Kan Liang <kan.liang@...ux.intel.com>, James Clark <james.clark@...aro.org>,
Howard Chu <howardchu95@...il.com>, Thomas Falcon <thomas.falcon@...el.com>,
Chun-Tse Shao <ctshao@...gle.com>, Dapeng Mi <dapeng1.mi@...ux.intel.com>,
linux-perf-users@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: [PATCH v1 1/2] perf stat: Move create_perf_stat_counter to builtin-stat
The function create_perf_stat_counter is only used in builtin-stat.c
and contains logic about retrying events specific to
builtin-stat.c. Move the code to builtin-stat to tidy this up.
Signed-off-by: Ian Rogers <irogers@...gle.com>
---
tools/perf/builtin-stat.c | 60 +++++++++++++++++++++++++++++++++++++--
tools/perf/util/stat.c | 56 ------------------------------------
tools/perf/util/stat.h | 4 ---
3 files changed, 58 insertions(+), 62 deletions(-)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index ab567919b89a..75b9979c6c05 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -676,6 +676,62 @@ static enum counter_recovery stat_handle_error(struct evsel *counter, int err)
return COUNTER_FATAL;
}
+static int create_perf_stat_counter(struct evsel *evsel,
+ struct perf_stat_config *config,
+ int cpu_map_idx)
+{
+ struct perf_event_attr *attr = &evsel->core.attr;
+ struct evsel *leader = evsel__leader(evsel);
+
+ /* Reset supported flag as creating a stat counter is retried. */
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
+
+ /*
+ * The event is part of non trivial group, let's enable
+ * the group read (for leader) and ID retrieval for all
+ * members.
+ */
+ if (leader->core.nr_members > 1)
+ attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
+
+ attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
+
+ /*
+ * Some events get initialized with sample_(period/type) set,
+ * like tracepoints. Clear it up for counting.
+ */
+ attr->sample_period = 0;
+
+ if (config->identifier)
+ attr->sample_type = PERF_SAMPLE_IDENTIFIER;
+
+ if (config->all_user) {
+ attr->exclude_kernel = 1;
+ attr->exclude_user = 0;
+ }
+
+ if (config->all_kernel) {
+ attr->exclude_kernel = 0;
+ attr->exclude_user = 1;
+ }
+
+ /*
+ * Disabling all counters initially, they will be enabled
+ * either manually by us or by kernel via enable_on_exec
+ * set later.
+ */
+ if (evsel__is_group_leader(evsel)) {
+ attr->disabled = 1;
+
+ if (target__enable_on_exec(&target))
+ attr->enable_on_exec = 1;
+ }
+
+ return evsel__open_per_cpu_and_thread(evsel, evsel__cpus(evsel), cpu_map_idx,
+ evsel->core.threads);
+}
+
static int __run_perf_stat(int argc, const char **argv, int run_idx)
{
int interval = stat_config.interval;
@@ -736,7 +792,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
if (evsel__is_bperf(counter))
continue;
try_again:
- if (create_perf_stat_counter(counter, &stat_config, &target,
+ if (create_perf_stat_counter(counter, &stat_config,
evlist_cpu_itr.cpu_map_idx) < 0) {
/*
@@ -794,7 +850,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
continue;
try_again_reset:
pr_debug2("reopening weak %s\n", evsel__name(counter));
- if (create_perf_stat_counter(counter, &stat_config, &target,
+ if (create_perf_stat_counter(counter, &stat_config,
evlist_cpu_itr.cpu_map_idx) < 0) {
switch (stat_handle_error(counter, errno)) {
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 50b1a92d16df..101ed6c497bc 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -716,59 +716,3 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
return ret;
}
-
-int create_perf_stat_counter(struct evsel *evsel,
- struct perf_stat_config *config,
- struct target *target,
- int cpu_map_idx)
-{
- struct perf_event_attr *attr = &evsel->core.attr;
- struct evsel *leader = evsel__leader(evsel);
-
- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
- PERF_FORMAT_TOTAL_TIME_RUNNING;
-
- /*
- * The event is part of non trivial group, let's enable
- * the group read (for leader) and ID retrieval for all
- * members.
- */
- if (leader->core.nr_members > 1)
- attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
-
- attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
-
- /*
- * Some events get initialized with sample_(period/type) set,
- * like tracepoints. Clear it up for counting.
- */
- attr->sample_period = 0;
-
- if (config->identifier)
- attr->sample_type = PERF_SAMPLE_IDENTIFIER;
-
- if (config->all_user) {
- attr->exclude_kernel = 1;
- attr->exclude_user = 0;
- }
-
- if (config->all_kernel) {
- attr->exclude_kernel = 0;
- attr->exclude_user = 1;
- }
-
- /*
- * Disabling all counters initially, they will be enabled
- * either manually by us or by kernel via enable_on_exec
- * set later.
- */
- if (evsel__is_group_leader(evsel)) {
- attr->disabled = 1;
-
- if (target__enable_on_exec(target))
- attr->enable_on_exec = 1;
- }
-
- return evsel__open_per_cpu_and_thread(evsel, evsel__cpus(evsel), cpu_map_idx,
- evsel->core.threads);
-}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 4b0f14ae4e5f..34f30a295f89 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -223,10 +223,6 @@ size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
-int create_perf_stat_counter(struct evsel *evsel,
- struct perf_stat_config *config,
- struct target *target,
- int cpu_map_idx);
void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config,
struct target *_target, struct timespec *ts, int argc, const char **argv);
--
2.51.0.618.g983fd99d29-goog
Powered by blists - more mailing lists