[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230925061824.3818631-21-weilin.wang@intel.com>
Date: Sun, 24 Sep 2023 23:18:19 -0700
From: weilin.wang@...el.com
To: weilin.wang@...el.com, Ian Rogers <irogers@...gle.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Adrian Hunter <adrian.hunter@...el.com>,
Kan Liang <kan.liang@...ux.intel.com>
Cc: linux-perf-users@...r.kernel.org, linux-kernel@...r.kernel.org,
Perry Taylor <perry.taylor@...el.com>,
Samantha Alt <samantha.alt@...el.com>,
Caleb Biggers <caleb.biggers@...el.com>,
Mark Rutland <mark.rutland@....com>
Subject: [RFC PATCH 20/25] perf stat: Code refactoring in hardware-grouping
From: Weilin Wang <weilin.wang@...el.com>
Decouple the step to generate final grouping strings out from the
build_grouping step so that we could do single metric grouping and then merge
groups if needed later.
Signed-off-by: Weilin Wang <weilin.wang@...el.com>
---
tools/perf/util/metricgroup.c | 49 +++++++++++++++++------------------
1 file changed, 24 insertions(+), 25 deletions(-)
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index c027c0351..3c569d838 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -1799,9 +1799,10 @@ static int find_and_set_counters(struct metricgroup__event_info *e,
{
int ret;
unsigned long find_bit = 0;
-
- if (e->taken_alone && current_group->taken_alone)
+ if (e->taken_alone && current_group->taken_alone) {
+ pr_debug("current group with taken alone event already\n");
return -ENOSPC;
+ }
if (e->free_counter)
return 0;
if (e->fixed_counter) {
@@ -1919,7 +1920,8 @@ static int assign_event_grouping(struct metricgroup__event_info *e,
list_for_each_entry(g, groups, nd) {
if (!strcasecmp(g->pmu_name, e->pmu_name)) {
- pr_debug("found group for event %s in pmu %s\n", e->name, g->pmu_name);
+ pr_debug("found group header for event %s in pmu %s\n",
+ e->name, g->pmu_name);
pmu_group_head = g;
break;
}
@@ -2046,26 +2048,22 @@ static int hw_aware_metricgroup__build_event_string(struct list_head *group_strs
*/
static int create_grouping(struct list_head *pmu_info_list,
struct list_head *event_info_list,
- struct list_head *groupings,
- const char *modifier)
+ struct list_head *grouping)
{
int ret = 0;
struct metricgroup__event_info *e;
- LIST_HEAD(groups);
char *bit_buf = malloc(NR_COUNTERS);
- //TODO: for each new core group, we could consider to add events that uses fixed counters
+ //TODO: for each new core group, we could consider to add events that
+ //uses fixed counters
list_for_each_entry(e, event_info_list, nd) {
bitmap_scnprintf(e->counters, NR_COUNTERS, bit_buf, NR_COUNTERS);
pr_debug("Event name %s, [pmu]=%s, [counters]=%s, [taken_alone]=%d\n",
e->name, e->pmu_name, bit_buf, e->taken_alone);
- ret = assign_event_grouping(e, pmu_info_list, &groups);
+ ret = assign_event_grouping(e, pmu_info_list, grouping);
if (ret)
- goto out;
+ return ret;
}
- ret = hw_aware_metricgroup__build_event_string(groupings, modifier, &groups);
-out:
- metricgroup__free_group_list(&groups);
return ret;
};
@@ -2077,9 +2075,8 @@ static int create_grouping(struct list_head *pmu_info_list,
* @groupings: header to the list of final event grouping.
* @modifier: any modifiers added to the events.
*/
-static int hw_aware_build_grouping(struct expr_parse_ctx *ctx __maybe_unused,
- struct list_head *groupings __maybe_unused,
- const char *modifier __maybe_unused)
+static int hw_aware_build_grouping(struct expr_parse_ctx *ctx,
+ struct list_head *grouping)
{
int ret = 0;
struct hashmap_entry *cur;
@@ -2112,8 +2109,7 @@ static int hw_aware_build_grouping(struct expr_parse_ctx *ctx __maybe_unused,
ret = get_pmu_counter_layouts(&pmu_info_list, ltable);
if (ret)
goto err_out;
- ret = create_grouping(&pmu_info_list, &event_info_list, groupings,
- modifier);
+ ret = create_grouping(&pmu_info_list, &event_info_list, grouping);
err_out:
metricgroup__free_event_info(&event_info_list);
@@ -2159,23 +2155,25 @@ static int hw_aware_parse_ids(struct perf_pmu *fake_pmu,
{
struct parse_events_error parse_error;
struct evlist *parsed_evlist;
- LIST_HEAD(groupings);
+ LIST_HEAD(grouping_str);
+ LIST_HEAD(grouping);
struct metricgroup__group_strs *group;
int ret;
*out_evlist = NULL;
- ret = hw_aware_build_grouping(ids, &groupings, modifier);
- if (ret) {
- metricgroup__free_grouping_strs(&groupings);
- return ret;
- }
+ ret = hw_aware_build_grouping(ids, &grouping);
+ if (ret)
+ goto err_out;
+ ret = hw_aware_metricgroup__build_event_string(&grouping_str, modifier, &grouping);
+ if (ret)
+ goto err_out;
parsed_evlist = evlist__new();
if (!parsed_evlist) {
ret = -ENOMEM;
goto err_out;
}
- list_for_each_entry(group, &groupings, nd) {
+ list_for_each_entry(group, &grouping_str, nd) {
struct strbuf *events = &group->grouping_str;
pr_debug("Parsing metric events '%s'\n", events->buf);
@@ -2193,9 +2191,10 @@ static int hw_aware_parse_ids(struct perf_pmu *fake_pmu,
*out_evlist = parsed_evlist;
parsed_evlist = NULL;
err_out:
+ metricgroup__free_group_list(&grouping);
+ metricgroup__free_grouping_strs(&grouping_str);
parse_events_error__exit(&parse_error);
evlist__delete(parsed_evlist);
- metricgroup__free_grouping_strs(&groupings);
return ret;
}
--
2.39.3
Powered by blists - more mailing lists