lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <640baace-99c7-435e-a754-a46a49c32deb@linaro.org>
Date: Fri, 3 Oct 2025 12:31:11 +0100
From: James Clark <james.clark@...aro.org>
To: Ian Rogers <irogers@...gle.com>
Cc: Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
 Arnaldo Carvalho de Melo <acme@...nel.org>,
 Namhyung Kim <namhyung@...nel.org>,
 Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
 Jiri Olsa <jolsa@...nel.org>, Adrian Hunter <adrian.hunter@...el.com>,
 Kan Liang <kan.liang@...ux.intel.com>, Howard Chu <howardchu95@...il.com>,
 Thomas Falcon <thomas.falcon@...el.com>, Chun-Tse Shao <ctshao@...gle.com>,
 Dapeng Mi <dapeng1.mi@...ux.intel.com>, linux-perf-users@...r.kernel.org,
 linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 1/2] perf stat: Move create_perf_stat_counter to
 builtin-stat



On 02/10/2025 11:07 pm, Ian Rogers wrote:
> The function create_perf_stat_counter is only used in builtin-stat.c
> and contains logic about retrying events specific to
> builtin-stat.c. Move the code to builtin-stat to tidy this up.
> 
> Signed-off-by: Ian Rogers <irogers@...gle.com>

Reviewed-by: James Clark <james.clark@...aro.org>

> ---
>   tools/perf/builtin-stat.c | 60 +++++++++++++++++++++++++++++++++++++--
>   tools/perf/util/stat.c    | 56 ------------------------------------
>   tools/perf/util/stat.h    |  4 ---
>   3 files changed, 58 insertions(+), 62 deletions(-)
> 
> diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
> index ab567919b89a..75b9979c6c05 100644
> --- a/tools/perf/builtin-stat.c
> +++ b/tools/perf/builtin-stat.c
> @@ -676,6 +676,62 @@ static enum counter_recovery stat_handle_error(struct evsel *counter, int err)
>   	return COUNTER_FATAL;
>   }
>   
> +static int create_perf_stat_counter(struct evsel *evsel,
> +				    struct perf_stat_config *config,
> +				    int cpu_map_idx)
> +{
> +	struct perf_event_attr *attr = &evsel->core.attr;
> +	struct evsel *leader = evsel__leader(evsel);
> +
> +	/* Reset supported flag as creating a stat counter is retried. */
> +	attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
> +			    PERF_FORMAT_TOTAL_TIME_RUNNING;
> +
> +	/*
> +	 * The event is part of non trivial group, let's enable
> +	 * the group read (for leader) and ID retrieval for all
> +	 * members.
> +	 */
> +	if (leader->core.nr_members > 1)
> +		attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
> +
> +	attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
> +
> +	/*
> +	 * Some events get initialized with sample_(period/type) set,
> +	 * like tracepoints. Clear it up for counting.
> +	 */
> +	attr->sample_period = 0;
> +
> +	if (config->identifier)
> +		attr->sample_type = PERF_SAMPLE_IDENTIFIER;
> +
> +	if (config->all_user) {
> +		attr->exclude_kernel = 1;
> +		attr->exclude_user   = 0;
> +	}
> +
> +	if (config->all_kernel) {
> +		attr->exclude_kernel = 0;
> +		attr->exclude_user   = 1;
> +	}
> +
> +	/*
> +	 * Disabling all counters initially, they will be enabled
> +	 * either manually by us or by kernel via enable_on_exec
> +	 * set later.
> +	 */
> +	if (evsel__is_group_leader(evsel)) {
> +		attr->disabled = 1;
> +
> +		if (target__enable_on_exec(&target))
> +			attr->enable_on_exec = 1;
> +	}
> +
> +	return evsel__open_per_cpu_and_thread(evsel, evsel__cpus(evsel), cpu_map_idx,
> +					      evsel->core.threads);
> +}
> +
>   static int __run_perf_stat(int argc, const char **argv, int run_idx)
>   {
>   	int interval = stat_config.interval;
> @@ -736,7 +792,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
>   		if (evsel__is_bperf(counter))
>   			continue;
>   try_again:
> -		if (create_perf_stat_counter(counter, &stat_config, &target,
> +		if (create_perf_stat_counter(counter, &stat_config,
>   					     evlist_cpu_itr.cpu_map_idx) < 0) {
>   
>   			/*
> @@ -794,7 +850,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
>   				continue;
>   try_again_reset:
>   			pr_debug2("reopening weak %s\n", evsel__name(counter));
> -			if (create_perf_stat_counter(counter, &stat_config, &target,
> +			if (create_perf_stat_counter(counter, &stat_config,
>   						     evlist_cpu_itr.cpu_map_idx) < 0) {
>   
>   				switch (stat_handle_error(counter, errno)) {
> diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
> index 50b1a92d16df..101ed6c497bc 100644
> --- a/tools/perf/util/stat.c
> +++ b/tools/perf/util/stat.c
> @@ -716,59 +716,3 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
>   
>   	return ret;
>   }
> -
> -int create_perf_stat_counter(struct evsel *evsel,
> -			     struct perf_stat_config *config,
> -			     struct target *target,
> -			     int cpu_map_idx)
> -{
> -	struct perf_event_attr *attr = &evsel->core.attr;
> -	struct evsel *leader = evsel__leader(evsel);
> -
> -	attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
> -			    PERF_FORMAT_TOTAL_TIME_RUNNING;
> -
> -	/*
> -	 * The event is part of non trivial group, let's enable
> -	 * the group read (for leader) and ID retrieval for all
> -	 * members.
> -	 */
> -	if (leader->core.nr_members > 1)
> -		attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
> -
> -	attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
> -
> -	/*
> -	 * Some events get initialized with sample_(period/type) set,
> -	 * like tracepoints. Clear it up for counting.
> -	 */
> -	attr->sample_period = 0;
> -
> -	if (config->identifier)
> -		attr->sample_type = PERF_SAMPLE_IDENTIFIER;
> -
> -	if (config->all_user) {
> -		attr->exclude_kernel = 1;
> -		attr->exclude_user   = 0;
> -	}
> -
> -	if (config->all_kernel) {
> -		attr->exclude_kernel = 0;
> -		attr->exclude_user   = 1;
> -	}
> -
> -	/*
> -	 * Disabling all counters initially, they will be enabled
> -	 * either manually by us or by kernel via enable_on_exec
> -	 * set later.
> -	 */
> -	if (evsel__is_group_leader(evsel)) {
> -		attr->disabled = 1;
> -
> -		if (target__enable_on_exec(target))
> -			attr->enable_on_exec = 1;
> -	}
> -
> -	return evsel__open_per_cpu_and_thread(evsel, evsel__cpus(evsel), cpu_map_idx,
> -					      evsel->core.threads);
> -}
> diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
> index 4b0f14ae4e5f..34f30a295f89 100644
> --- a/tools/perf/util/stat.h
> +++ b/tools/perf/util/stat.h
> @@ -223,10 +223,6 @@ size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp);
>   size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp);
>   size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
>   
> -int create_perf_stat_counter(struct evsel *evsel,
> -			     struct perf_stat_config *config,
> -			     struct target *target,
> -			     int cpu_map_idx);
>   void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *config,
>   			    struct target *_target, struct timespec *ts, int argc, const char **argv);
>   


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ