[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAP-5=fU-JoD20zLVBgjDfR8bDqdb_s+pJqXZqJdSdgFnsfbiTg@mail.gmail.com>
Date: Sat, 23 Mar 2024 22:00:49 -0700
From: Ian Rogers <irogers@...gle.com>
To: weilin.wang@...el.com
Cc: Kan Liang <kan.liang@...ux.intel.com>, Namhyung Kim <namhyung@...nel.org>,
Arnaldo Carvalho de Melo <acme@...nel.org>, Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>,
Adrian Hunter <adrian.hunter@...el.com>, linux-perf-users@...r.kernel.org,
linux-kernel@...r.kernel.org, Perry Taylor <perry.taylor@...el.com>,
Samantha Alt <samantha.alt@...el.com>, Caleb Biggers <caleb.biggers@...el.com>,
Mark Rutland <mark.rutland@....com>
Subject: Re: [RFC PATCH v4 07/15] perf stat: Add functions to create new group
and assign events into groups
On Thu, Feb 8, 2024 at 7:14 PM <weilin.wang@...el.com> wrote:
>
> From: Weilin Wang <weilin.wang@...el.com>
>
> Add struct metricgroup__pmu_group_list to hold the lists of groups from
> different PMUs. Each PMU has one separate list.
>
> Add struct metricgroup__group as one node (one group in the grouping
> result) of the metricgroup__pmu_group_list. It uses two bitmaps to log
> counter availabilities(gp counters and fixed counters).
>
> Add functions to create group and assign event into the groups based on the
> event restrictions (struct metricgroup__event_info) and counter
> availability (pmu_info_list and bitmaps). New group is inserted into the
> list of groups.
>
> Add functions to handle counter bitmaps. Add functions do find and insert
> operations to handle inserting event into groups.
>
> Add function to fill all bits of one counter bitmap. Add functions to
> create new groups when no counter is available in all the existing groups.
>
> Signed-off-by: Weilin Wang <weilin.wang@...el.com>
Reviewed-by: Ian Rogers <irogers@...gle.com>
Thanks,
Ian
> ---
> tools/perf/util/metricgroup.c | 296 ++++++++++++++++++++++++++++++++++
> 1 file changed, 296 insertions(+)
>
> diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
> index 9061ed4ca015..f86e9a0b0d65 100644
> --- a/tools/perf/util/metricgroup.c
> +++ b/tools/perf/util/metricgroup.c
> @@ -192,6 +192,41 @@ struct metricgroup__pmu_counters {
> size_t num_counters;
> size_t num_fixed_counters;
> };
> +/**
> + * A list of event groups for this pmu.
> + * This is updated during the grouping.
> + */
> +struct metricgroup__pmu_group_list {
> + struct list_head nd;
> + /** The name of the pmu(/core) the events collected on. */
> + const char *pmu_name;
> + /** The number of gp counters in the pmu(/core). */
> + size_t num_counters;
> + /** The number of fixed counters in the pmu(/core) if applicable. */
> + size_t num_fixed_counters;
> + /** Head to the list of groups using this pmu(/core)*/
> + struct list_head group_head;
> +};
> +/**
> + * This is one node in the metricgroup__pmu_group_list.
> + * It represents on group.
> + */
> +struct metricgroup__group {
> + struct list_head nd;
> + /** The bitmaps represent availability of the counters.
> + * They are updated once the corresponding counter is used by
> + * an event (event inserted into the group).
> + */
> + DECLARE_BITMAP(gp_counters, NR_COUNTERS);
> + DECLARE_BITMAP(fixed_counters, NR_COUNTERS);
> + /** Head to the list of event names in this group*/
> + struct list_head event_head;
> +};
> +
> +struct metricgroup__group_events {
> + struct list_head nd;
> + const char *event_name;
> +};
>
> /**
> * Each group is one node in the group string list.
> @@ -1487,6 +1522,34 @@ static int set_counter_bitmap(int pos, unsigned long *bitmap)
> return 0;
> }
>
> +/**
> + * Returns 0 on success. Finds the last counter that is not used in pmu_counters
> + * and supports the event, included in event_counters.
> + */
> +static int find_counter_bitmap(const unsigned long *pmu_counters,
> + const unsigned long *event_counters,
> + unsigned long *bit)
> +{
> + /*It is helpful to assign from the highest bit because some events can
> + *only be collected using GP0-3.
> + */
> + unsigned long find_bit = find_last_and_bit(pmu_counters, event_counters, NR_COUNTERS);
> +
> + if (find_bit == NR_COUNTERS)
> + return -ERANGE;
> + *bit = find_bit;
> + return 0;
> +}
> +
> +static int use_counter_bitmap(unsigned long *bitmap,
> + unsigned long find_bit)
> +{
> + if (find_bit >= NR_COUNTERS)
> + return -EINVAL;
> + __clear_bit(find_bit, bitmap);
> + return 0;
> +}
> +
> static int parse_fixed_counter(const char *counter,
> unsigned long *bitmap,
> bool *fixed)
> @@ -1544,6 +1607,38 @@ static int parse_counter(const char *counter,
> return 0;
> }
>
> +static void group_event_list_free(struct metricgroup__group *groups)
> +{
> + struct metricgroup__group_events *e, *tmp;
> +
> + list_for_each_entry_safe(e, tmp, &groups->event_head, nd) {
> + list_del_init(&e->nd);
> + free(e);
> + }
> +}
> +
> +static void group_list_free(struct metricgroup__pmu_group_list *groups)
> +{
> + struct metricgroup__group *g, *tmp;
> +
> + list_for_each_entry_safe(g, tmp, &groups->group_head, nd) {
> + list_del_init(&g->nd);
> + group_event_list_free(g);
> + free(g);
> + }
> +}
> +
> +static void metricgroup__free_group_list(struct list_head *groups)
> +{
> + struct metricgroup__pmu_group_list *g, *tmp;
> +
> + list_for_each_entry_safe(g, tmp, groups, nd) {
> + list_del_init(&g->nd);
> + group_list_free(g);
> + free(g);
> + }
> +}
> +
> static void metricgroup__free_event_info(struct list_head
> *event_info_list)
> {
> @@ -1719,6 +1814,203 @@ static int get_pmu_counter_layouts(struct list_head *pmu_info_list,
> return ret;
> }
>
> +static int fill_counter_bitmap(unsigned long *bitmap, int start, int size)
> +{
> + int ret;
> +
> + bitmap_zero(bitmap, NR_COUNTERS);
> +
> + for (int pos = start; pos < start + size; pos++) {
> + ret = set_counter_bitmap(pos, bitmap);
> + if (ret)
> + return ret;
> + }
> + return 0;
> +}
> +
> +/**
> + * Find if there is a counter available for event e in current_group. If a
> + * counter is available, use this counter by filling the bit in the correct
> + * counter bitmap. Otherwise, return error (-ERANGE).
> + */
> +static int find_and_set_counters(struct metricgroup__event_info *e,
> + struct metricgroup__group *current_group)
> +{
> + int ret;
> + unsigned long find_bit = 0;
> +
> + if (e->free_counter)
> + return 0;
> + if (e->fixed_counter) {
> + ret = find_counter_bitmap(current_group->fixed_counters, e->counters,
> + &find_bit);
> + if (ret)
> + return ret;
> + pr_debug("found counter for [event]=%s [e->fixed_counters]=%lu\n",
> + e->name, *current_group->fixed_counters);
> + ret = use_counter_bitmap(current_group->fixed_counters, find_bit);
> + } else {
> + ret = find_counter_bitmap(current_group->gp_counters, e->counters,
> + &find_bit);
> + if (ret)
> + return ret;
> + pr_debug("found counter for [event]=%s [e->gp_counters]=%lu\n",
> + e->name, *current_group->gp_counters);
> + ret = use_counter_bitmap(current_group->gp_counters, find_bit);
> + }
> + return ret;
> +}
> +
> +static int _insert_event(struct metricgroup__event_info *e,
> + struct metricgroup__group *group)
> +{
> + struct metricgroup__group_events *event = malloc(sizeof(struct metricgroup__group_events));
> +
> + if (!event)
> + return -ENOMEM;
> + event->event_name = e->name;
> + if (e->fixed_counter)
> + list_add(&event->nd, &group->event_head);
> + else
> + list_add_tail(&event->nd, &group->event_head);
> + return 0;
> +}
> +
> +/**
> + * Insert the new_group node at the end of the group list.
> + */
> +static int insert_new_group(struct list_head *head,
> + struct metricgroup__group *new_group,
> + size_t num_counters,
> + size_t num_fixed_counters)
> +{
> + INIT_LIST_HEAD(&new_group->event_head);
> + fill_counter_bitmap(new_group->gp_counters, 0, num_counters);
> + fill_counter_bitmap(new_group->fixed_counters, 0, num_fixed_counters);
> + list_add_tail(&new_group->nd, head);
> + return 0;
> +}
> +
> +/**
> + * Insert event e into a group capable to include it
> + *
> + */
> +static int insert_event_to_group(struct metricgroup__event_info *e,
> + struct metricgroup__pmu_group_list *pmu_group_head)
> +{
> + struct metricgroup__group *g;
> + int ret;
> + struct list_head *head;
> +
> + list_for_each_entry(g, &pmu_group_head->group_head, nd) {
> + ret = find_and_set_counters(e, g);
> + if (!ret) { /* return if successfully find and set counter*/
> + ret = _insert_event(e, g);
> + return ret;
> + }
> + }
> + /*
> + * We were not able to find an existing group to insert this event.
> + * Continue to create a new group and insert the event in it.
> + */
> + {
> + struct metricgroup__group *current_group =
> + malloc(sizeof(struct metricgroup__group));
> +
> + if (!current_group)
> + return -ENOMEM;
> + pr_debug("create_new_group for [event] %s\n", e->name);
> +
> + head = &pmu_group_head->group_head;
> + ret = insert_new_group(head, current_group, pmu_group_head->num_counters,
> + pmu_group_head->num_fixed_counters);
> + if (ret)
> + return ret;
> + ret = find_and_set_counters(e, current_group);
> + if (ret)
> + return ret;
> + ret = _insert_event(e, current_group);
> + }
> +
> + return ret;
> +}
> +
> +/**
> + * assign_event_grouping - Assign an event into a group. If existing group
> + * cannot include it, create a new group and insert the event to it.
> + */
> +static int assign_event_grouping(struct metricgroup__event_info *e,
> + struct list_head *pmu_info_list,
> + struct list_head *groups)
> +{
> + int ret = 0;
> +
> + struct metricgroup__pmu_group_list *g = NULL;
> + struct metricgroup__pmu_group_list *pmu_group_head = NULL;
> +
> + list_for_each_entry(g, groups, nd) {
> + if (!strcasecmp(g->pmu_name, e->pmu_name)) {
> + pr_debug("found group for event %s in pmu %s\n", e->name, g->pmu_name);
> + pmu_group_head = g;
> + break;
> + }
> + }
> + if (!pmu_group_head) {
> + struct metricgroup__pmu_counters *p;
> +
> + pmu_group_head = malloc(sizeof(struct metricgroup__pmu_group_list));
> + if (!pmu_group_head)
> + return -ENOMEM;
> + INIT_LIST_HEAD(&pmu_group_head->group_head);
> + pr_debug("create new group for event %s in pmu %s\n", e->name, e->pmu_name);
> + pmu_group_head->pmu_name = e->pmu_name;
> + list_for_each_entry(p, pmu_info_list, nd) {
> + if (!strcasecmp(p->name, e->pmu_name)) {
> + pmu_group_head->num_counters = p->num_counters;
> + pmu_group_head->num_fixed_counters = p->num_fixed_counters;
> + break;
> + }
> + }
> + list_add_tail(&pmu_group_head->nd, groups);
> + }
> +
> + ret = insert_event_to_group(e, pmu_group_head);
> + return ret;
> +}
> +
> +/**
> + * create_grouping - Create a list of groups and place all the events of
> + * event_info_list into these groups.
> + * @pmu_info_list: the list of PMU units info based on pmu-events data, used for
> + * creating new groups.
> + * @event_info_list: the list of events to be grouped.
> + * @groupings: the list of groups with events placed in.
> + * @modifier: any modifiers added to the events.
> + */
> +static int create_grouping(struct list_head *pmu_info_list,
> + struct list_head *event_info_list,
> + struct list_head *groupings __maybe_unused,
> + const char *modifier __maybe_unused)
> +{
> + int ret = 0;
> + struct metricgroup__event_info *e;
> + LIST_HEAD(groups);
> + char *bit_buf = malloc(NR_COUNTERS);
> +
> + //TODO: for each new core group, we should consider to add events that uses fixed counters
> + list_for_each_entry(e, event_info_list, nd) {
> + bitmap_scnprintf(e->counters, NR_COUNTERS, bit_buf, NR_COUNTERS);
> + pr_debug("Event name %s, [pmu]=%s, [counters]=%s\n", e->name,
> + e->pmu_name, bit_buf);
> + ret = assign_event_grouping(e, pmu_info_list, &groups);
> + if (ret)
> + goto out;
> + }
> +out:
> + metricgroup__free_group_list(&groups);
> + return ret;
> +};
> +
> /**
> * hw_aware_build_grouping - Build event groupings by reading counter
> * requirement of the events and counter available on the system from
> @@ -1750,6 +2042,10 @@ static int hw_aware_build_grouping(struct expr_parse_ctx *ctx __maybe_unused,
> goto err_out;
> }
> ret = get_pmu_counter_layouts(&pmu_info_list, ltable);
> + if (ret)
> + goto err_out;
> + ret = create_grouping(&pmu_info_list, &event_info_list, groupings,
> + modifier);
>
> err_out:
> metricgroup__free_event_info(&event_info_list);
> --
> 2.42.0
>
Powered by blists - more mailing lists