[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180830063252.23729-9-jolsa@kernel.org>
Date: Thu, 30 Aug 2018 08:32:17 +0200
From: Jiri Olsa <jolsa@...nel.org>
To: Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: lkml <linux-kernel@...r.kernel.org>,
Ingo Molnar <mingo@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
David Ahern <dsahern@...il.com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Peter Zijlstra <a.p.zijlstra@...llo.nl>,
Andi Kleen <andi@...stfloor.org>
Subject: [PATCH 08/43] perf stat: Move create_perf_stat_counter into stat.c
Moving create_perf_stat_counter to stat object,
so we could use it globally.
Link: http://lkml.kernel.org/n/tip-47odtb7otnqlo07h0crk6ftp@git.kernel.org
Signed-off-by: Jiri Olsa <jolsa@...nel.org>
---
tools/perf/builtin-stat.c | 54 +--------------------------------------
tools/perf/util/stat.c | 53 ++++++++++++++++++++++++++++++++++++++
tools/perf/util/stat.h | 4 +++
3 files changed, 58 insertions(+), 53 deletions(-)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 45bbd156d496..142cff8eb12b 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -234,58 +234,6 @@ static void perf_stat__reset_stats(void)
perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
}
-static int create_perf_stat_counter(struct perf_evsel *evsel,
- struct perf_stat_config *config)
-{
- struct perf_event_attr *attr = &evsel->attr;
- struct perf_evsel *leader = evsel->leader;
-
- if (config->scale) {
- attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
- PERF_FORMAT_TOTAL_TIME_RUNNING;
- }
-
- /*
- * The event is part of non trivial group, let's enable
- * the group read (for leader) and ID retrieval for all
- * members.
- */
- if (leader->nr_members > 1)
- attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
-
- attr->inherit = !config->no_inherit;
-
- /*
- * Some events get initialized with sample_(period/type) set,
- * like tracepoints. Clear it up for counting.
- */
- attr->sample_period = 0;
-
- if (config->identifier)
- attr->sample_type = PERF_SAMPLE_IDENTIFIER;
-
- /*
- * Disabling all counters initially, they will be enabled
- * either manually by us or by kernel via enable_on_exec
- * set later.
- */
- if (perf_evsel__is_group_leader(evsel)) {
- attr->disabled = 1;
-
- /*
- * In case of initial_delay we enable tracee
- * events manually.
- */
- if (target__none(&target) && !config->initial_delay)
- attr->enable_on_exec = 1;
- }
-
- if (target__has_cpu(&target) && !target__has_per_thread(&target))
- return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
-
- return perf_evsel__open_per_thread(evsel, evsel->threads);
-}
-
static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -568,7 +516,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
evlist__for_each_entry(evsel_list, counter) {
try_again:
- if (create_perf_stat_counter(counter, &stat_config) < 0) {
+ if (create_perf_stat_counter(counter, &stat_config, &target) < 0) {
/* Weak group failed. Reset the group. */
if ((errno == EINVAL || errno == EBADF) &&
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index a0061e0b0fad..3bd24255376a 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -435,3 +435,56 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
return ret;
}
+
+int create_perf_stat_counter(struct perf_evsel *evsel,
+ struct perf_stat_config *config,
+ struct target *target)
+{
+ struct perf_event_attr *attr = &evsel->attr;
+ struct perf_evsel *leader = evsel->leader;
+
+ if (config->scale) {
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
+ }
+
+ /*
+ * The event is part of non trivial group, let's enable
+ * the group read (for leader) and ID retrieval for all
+ * members.
+ */
+ if (leader->nr_members > 1)
+ attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
+
+ attr->inherit = !config->no_inherit;
+
+ /*
+ * Some events get initialized with sample_(period/type) set,
+ * like tracepoints. Clear it up for counting.
+ */
+ attr->sample_period = 0;
+
+ if (config->identifier)
+ attr->sample_type = PERF_SAMPLE_IDENTIFIER;
+
+ /*
+ * Disabling all counters initially, they will be enabled
+ * either manually by us or by kernel via enable_on_exec
+ * set later.
+ */
+ if (perf_evsel__is_group_leader(evsel)) {
+ attr->disabled = 1;
+
+ /*
+ * In case of initial_delay we enable tracee
+ * events manually.
+ */
+ if (target__none(target) && !config->initial_delay)
+ attr->enable_on_exec = 1;
+ }
+
+ if (target__has_cpu(target) && !target__has_per_thread(target))
+ return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
+
+ return perf_evsel__open_per_thread(evsel, evsel->threads);
+}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 918cde064cdc..8fb596641545 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -175,4 +175,8 @@ int perf_event__process_stat_event(struct perf_tool *tool,
size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
+
+int create_perf_stat_counter(struct perf_evsel *evsel,
+ struct perf_stat_config *config,
+ struct target *target);
#endif
--
2.17.1
Powered by blists - more mailing lists