[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <52b13a04-2d14-5011-01c9-0989aa47d40d@linux.intel.com>
Date: Tue, 26 Sep 2023 10:50:21 -0400
From: "Liang, Kan" <kan.liang@...ux.intel.com>
To: weilin.wang@...el.com, Ian Rogers <irogers@...gle.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Adrian Hunter <adrian.hunter@...el.com>
Cc: linux-perf-users@...r.kernel.org, linux-kernel@...r.kernel.org,
Perry Taylor <perry.taylor@...el.com>,
Samantha Alt <samantha.alt@...el.com>,
Caleb Biggers <caleb.biggers@...el.com>,
Mark Rutland <mark.rutland@....com>
Subject: Re: [RFC PATCH 01/25] perf stat: Add hardware-grouping cmd option to
perf stat
On 2023-09-25 2:18 a.m., weilin.wang@...el.com wrote:
> From: Weilin Wang <weilin.wang@...el.com>
>
> Hardware counter and event information could be used to help creating event
> groups that better utilize hardware counters and improve multiplexing.
>
> Add a cmd option to allow user to choose this new metric grouping method.
>
It's better to mention this option is only available with -M.
> $ perf stat -M TopdownL1 -a --hardware-grouping
>
I think it should be counter aware grouping. Hardware grouping sounds
like the group is created by hardware.
Eventually we should make it default and let the default grouping code
take the counter information into account.
Thanks,
Kan
> Signed-off-by: Weilin Wang <weilin.wang@...el.com>
> ---
> tools/perf/builtin-stat.c | 7 +++++++
> tools/perf/util/metricgroup.c | 5 +++++
> tools/perf/util/metricgroup.h | 1 +
> tools/perf/util/stat.h | 1 +
> 4 files changed, 14 insertions(+)
>
> diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
> index 07b48f6df..1a7dbfff5 100644
> --- a/tools/perf/builtin-stat.c
> +++ b/tools/perf/builtin-stat.c
> @@ -1256,6 +1256,8 @@ static struct option stat_options[] = {
> "don't try to share events between metrics in a group"),
> OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold,
> "don't try to share events between metrics in a group "),
> + OPT_BOOLEAN(0, "hardware-grouping", &stat_config.hardware_aware_grouping,
> + "Use hardware aware metric grouping method"),
> OPT_BOOLEAN(0, "topdown", &topdown_run,
> "measure top-down statistics"),
> OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
> @@ -2061,6 +2063,7 @@ static int add_default_attributes(void)
> stat_config.metric_no_threshold,
> stat_config.user_requested_cpu_list,
> stat_config.system_wide,
> + stat_config.hardware_aware_grouping,
> &stat_config.metric_events);
> }
>
> @@ -2094,6 +2097,7 @@ static int add_default_attributes(void)
> stat_config.metric_no_threshold,
> stat_config.user_requested_cpu_list,
> stat_config.system_wide,
> + stat_config.hardware_aware_grouping,
> &stat_config.metric_events);
> }
>
> @@ -2128,6 +2132,7 @@ static int add_default_attributes(void)
> /*metric_no_threshold=*/true,
> stat_config.user_requested_cpu_list,
> stat_config.system_wide,
> + stat_config.hardware_aware_grouping,
> &stat_config.metric_events) < 0)
> return -1;
> }
> @@ -2169,6 +2174,7 @@ static int add_default_attributes(void)
> /*metric_no_threshold=*/true,
> stat_config.user_requested_cpu_list,
> stat_config.system_wide,
> + stat_config.hardware_aware_grouping,
> &stat_config.metric_events) < 0)
> return -1;
>
> @@ -2702,6 +2708,7 @@ int cmd_stat(int argc, const char **argv)
> stat_config.metric_no_threshold,
> stat_config.user_requested_cpu_list,
> stat_config.system_wide,
> + stat_config.hardware_aware_grouping,
> &stat_config.metric_events);
> zfree(&metrics);
> }
> diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
> index 6231044a4..b08af6860 100644
> --- a/tools/perf/util/metricgroup.c
> +++ b/tools/perf/util/metricgroup.c
> @@ -1690,12 +1690,17 @@ int metricgroup__parse_groups(struct evlist *perf_evlist,
> bool metric_no_threshold,
> const char *user_requested_cpu_list,
> bool system_wide,
> + bool hardware_aware_grouping,
> struct rblist *metric_events)
> {
> const struct pmu_metrics_table *table = pmu_metrics_table__find();
>
> if (!table)
> return -EINVAL;
> + if (hardware_aware_grouping) {
> + pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n");
> + }
> +
>
> return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
> metric_no_threshold, user_requested_cpu_list, system_wide,
> diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
> index d5325c6ec..779f6ede1 100644
> --- a/tools/perf/util/metricgroup.h
> +++ b/tools/perf/util/metricgroup.h
> @@ -77,6 +77,7 @@ int metricgroup__parse_groups(struct evlist *perf_evlist,
> bool metric_no_threshold,
> const char *user_requested_cpu_list,
> bool system_wide,
> + bool hardware_aware_grouping,
> struct rblist *metric_events);
> int metricgroup__parse_groups_test(struct evlist *evlist,
> const struct pmu_metrics_table *table,
> diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
> index 325d0fad1..f97d6282b 100644
> --- a/tools/perf/util/stat.h
> +++ b/tools/perf/util/stat.h
> @@ -87,6 +87,7 @@ struct perf_stat_config {
> bool metric_no_group;
> bool metric_no_merge;
> bool metric_no_threshold;
> + bool hardware_aware_grouping;
> bool stop_read_counter;
> bool iostat_run;
> char *user_requested_cpu_list;
Powered by blists - more mailing lists