[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231014015202.1175377-16-weilin.wang@intel.com>
Date: Fri, 13 Oct 2023 18:52:00 -0700
From: weilin.wang@...el.com
To: weilin.wang@...el.com, Ian Rogers <irogers@...gle.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Adrian Hunter <adrian.hunter@...el.com>,
Kan Liang <kan.liang@...ux.intel.com>
Cc: linux-perf-users@...r.kernel.org, linux-kernel@...r.kernel.org,
Perry Taylor <perry.taylor@...el.com>,
Samantha Alt <samantha.alt@...el.com>,
Caleb Biggers <caleb.biggers@...el.com>,
Mark Rutland <mark.rutland@....com>,
Yang Jihong <yangjihong1@...wei.com>
Subject: [RFC PATCH v2 15/17] perf stat: Code refactoring in hardware-grouping
From: Weilin Wang <weilin.wang@...el.com>
Decouple the step to generate final grouping strings out from the
build_grouping step so that we could do single metric grouping and then
merge groups if needed later.
Signed-off-by: Weilin Wang <weilin.wang@...el.com>
---
tools/perf/util/metricgroup.c | 50 +++++++++++++++++------------------
1 file changed, 25 insertions(+), 25 deletions(-)
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 7da91cbd3..1222cff8d 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -1801,9 +1801,10 @@ static int find_and_set_counters(struct metricgroup__event_info *e,
{
int ret;
unsigned long find_bit = 0;
-
- if (e->taken_alone && current_group->taken_alone)
+ if (e->taken_alone && current_group->taken_alone) {
+ pr_debug("current group with taken alone event already\n");
return -ENOSPC;
+ }
if (e->free_counter)
return 0;
if (e->fixed_counter) {
@@ -1922,7 +1923,8 @@ static int assign_event_grouping(struct metricgroup__event_info *e,
list_for_each_entry(g, groups, nd) {
if (!strcasecmp(g->pmu_name, e->pmu_name)) {
- pr_debug("found group for event %s in pmu %s\n", e->name, g->pmu_name);
+ pr_debug("found group header for event %s in pmu %s\n",
+ e->name, g->pmu_name);
pmu_group_head = g;
break;
}
@@ -2051,26 +2053,22 @@ static int hw_aware_metricgroup__build_event_string(struct list_head *group_strs
*/
static int create_grouping(struct list_head *pmu_info_list,
struct list_head *event_info_list,
- struct list_head *groupings,
- const char *modifier)
+ struct list_head *grouping)
{
int ret = 0;
struct metricgroup__event_info *e;
- LIST_HEAD(groups);
char *bit_buf = malloc(NR_COUNTERS);
- //TODO: for each new core group, we should consider to add events that uses fixed counters
+ //TODO: for each new core group, we could consider to add events that
+ //uses fixed counters
list_for_each_entry(e, event_info_list, nd) {
bitmap_scnprintf(e->counters, NR_COUNTERS, bit_buf, NR_COUNTERS);
pr_debug("Event name %s, [pmu]=%s, [counters]=%s, [taken_alone]=%d\n",
e->name, e->pmu_name, bit_buf, e->taken_alone);
- ret = assign_event_grouping(e, pmu_info_list, &groups);
+ ret = assign_event_grouping(e, pmu_info_list, grouping);
if (ret)
- goto out;
+ return ret;
}
- ret = hw_aware_metricgroup__build_event_string(groupings, modifier, &groups);
-out:
- metricgroup__free_group_list(&groups);
return ret;
};
@@ -2091,9 +2089,8 @@ static bool is_special_event(const char *id)
* @groupings: header to the list of final event grouping.
* @modifier: any modifiers added to the events.
*/
-static int hw_aware_build_grouping(struct expr_parse_ctx *ctx __maybe_unused,
- struct list_head *groupings __maybe_unused,
- const char *modifier __maybe_unused)
+static int hw_aware_build_grouping(struct expr_parse_ctx *ctx,
+ struct list_head *grouping)
{
int ret = 0;
struct hashmap_entry *cur;
@@ -2125,8 +2122,7 @@ static int hw_aware_build_grouping(struct expr_parse_ctx *ctx __maybe_unused,
ret = get_pmu_counter_layouts(&pmu_info_list, ltable);
if (ret)
goto err_out;
- ret = create_grouping(&pmu_info_list, &event_info_list, groupings,
- modifier);
+ ret = create_grouping(&pmu_info_list, &event_info_list, grouping);
err_out:
metricgroup__free_event_info(&event_info_list);
@@ -2172,23 +2168,25 @@ static int hw_aware_parse_ids(struct perf_pmu *fake_pmu,
{
struct parse_events_error parse_error;
struct evlist *parsed_evlist;
- LIST_HEAD(groupings);
+ LIST_HEAD(grouping_str);
+ LIST_HEAD(grouping);
struct metricgroup__group_strs *group;
int ret;
*out_evlist = NULL;
- ret = hw_aware_build_grouping(ids, &groupings, modifier);
- if (ret) {
- metricgroup__free_grouping_strs(&groupings);
- return ret;
- }
+ ret = hw_aware_build_grouping(ids, &grouping);
+ if (ret)
+ goto out;
+ ret = hw_aware_metricgroup__build_event_string(&grouping_str, modifier, &grouping);
+ if (ret)
+ goto out;
parsed_evlist = evlist__new();
if (!parsed_evlist) {
ret = -ENOMEM;
goto err_out;
}
- list_for_each_entry(group, &groupings, nd) {
+ list_for_each_entry(group, &grouping_str, nd) {
struct strbuf *events = &group->grouping_str;
pr_debug("Parsing metric events '%s'\n", events->buf);
@@ -2208,7 +2206,9 @@ static int hw_aware_parse_ids(struct perf_pmu *fake_pmu,
err_out:
parse_events_error__exit(&parse_error);
evlist__delete(parsed_evlist);
- metricgroup__free_grouping_strs(&groupings);
+out:
+ metricgroup__free_group_list(&grouping);
+ metricgroup__free_grouping_strs(&grouping_str);
return ret;
}
--
2.39.3
Powered by blists - more mailing lists