[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230925061824.3818631-13-weilin.wang@intel.com>
Date: Sun, 24 Sep 2023 23:18:11 -0700
From: weilin.wang@...el.com
To: weilin.wang@...el.com, Ian Rogers <irogers@...gle.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Adrian Hunter <adrian.hunter@...el.com>,
Kan Liang <kan.liang@...ux.intel.com>
Cc: linux-perf-users@...r.kernel.org, linux-kernel@...r.kernel.org,
Perry Taylor <perry.taylor@...el.com>,
Samantha Alt <samantha.alt@...el.com>,
Caleb Biggers <caleb.biggers@...el.com>,
Mark Rutland <mark.rutland@....com>
Subject: [RFC PATCH 12/25] perf stat: Add more functions for hardware-grouping method
From: Weilin Wang <weilin.wang@...el.com>
Add function to fill all bits of one counter bitmap. Add functions to
create new groups when no counter is available in all the existing groups.
Signed-off-by: Weilin Wang <weilin.wang@...el.com>
---
tools/perf/util/metricgroup.c | 39 ++++++++++++++++++++++++++++++-----
1 file changed, 34 insertions(+), 5 deletions(-)
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 68d56087b..8d54e71bf 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -1702,6 +1702,19 @@ static int get_pmu_counter_layouts(struct list_head *pmu_info_list,
return ret;
}
+static int fill_counter_bitmap(unsigned long *bitmap, int start, int size)
+{
+ int ret;
+ bitmap_zero(bitmap, NR_COUNTERS);
+
+ for (int pos = start; pos < start + size; pos++) {
+ ret = set_counter_bitmap(pos, bitmap);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
/**
* Find if there is a counter available for event e in current_group. If a
* counter is available, use this counter by fill the bit in the correct counter
@@ -1750,6 +1763,21 @@ static int _insert_event(struct metricgroup__event_info *e,
return 0;
}
+/**
+ * Insert the new_group node at the end of the group list.
+ */
+static int insert_new_group(struct list_head *head,
+ struct metricgroup__group *new_group,
+ size_t size,
+ size_t fixed_size)
+{
+ INIT_LIST_HEAD(&new_group->event_head);
+ fill_counter_bitmap(new_group->gp_counters, 0, size);
+ fill_counter_bitmap(new_group->fixed_counters, 0, fixed_size);
+ list_add_tail(&new_group->nd, head);
+ return 0;
+}
+
/**
* Insert event e into a group capable to include it
*
@@ -1759,7 +1787,7 @@ static int insert_event_to_group(struct metricgroup__event_info *e,
{
struct metricgroup__group *g;
int ret;
- //struct list_head *head;
+ struct list_head *head;
list_for_each_entry(g, &pmu_group_head->group_head, nd) {
ret = find_and_set_counters(e, g);
@@ -1774,13 +1802,14 @@ static int insert_event_to_group(struct metricgroup__event_info *e,
*/
{
struct metricgroup__group *current_group = malloc(sizeof(struct metricgroup__group));
+
if (!current_group)
return -ENOMEM;
pr_debug("create_new_group for [event] %s\n", e->name);
- //head = &pmu_group_head->group_head;
- //ret = create_new_group(head, current_group, pmu_group_head->size,
- // pmu_group_head->fixed_size);
+ head = &pmu_group_head->group_head;
+ ret = insert_new_group(head, current_group, pmu_group_head->size,
+ pmu_group_head->fixed_size);
if (ret)
return ret;
ret = find_and_set_counters(e, current_group);
@@ -1817,7 +1846,7 @@ static int assign_event_grouping(struct metricgroup__event_info *e,
pmu_group_head = malloc(sizeof(struct metricgroup__pmu_group_list));
INIT_LIST_HEAD(&pmu_group_head->group_head);
- pr_debug("create new group for event %s in pmu %s ", e->name, e->pmu_name);
+ pr_debug("create new group for event %s in pmu %s\n", e->name, e->pmu_name);
pmu_group_head->pmu_name = e->pmu_name;
list_for_each_entry(p, pmu_info_list, nd) {
if (!strcasecmp(p->name, e->pmu_name)) {
--
2.39.3
Powered by blists - more mailing lists