[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <dd5070a7-254d-4763-439f-a5bfa8240fe1@oracle.com>
Date: Mon, 23 Jan 2023 15:35:52 +0000
From: John Garry <john.g.garry@...cle.com>
To: Ian Rogers <irogers@...gle.com>, Will Deacon <will@...nel.org>,
James Clark <james.clark@....com>,
Mike Leach <mike.leach@...aro.org>,
Leo Yan <leo.yan@...aro.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Adrian Hunter <adrian.hunter@...el.com>,
Kan Liang <kan.liang@...ux.intel.com>,
Kim Phillips <kim.phillips@....com>,
Florian Fischer <florian.fischer@...q.space>,
Ravi Bangoria <ravi.bangoria@....com>,
Xing Zhengjun <zhengjun.xing@...ux.intel.com>,
Rob Herring <robh@...nel.org>,
Kang Minchul <tegongkang@...il.com>,
linux-arm-kernel@...ts.infradead.org,
linux-perf-users@...r.kernel.org, linux-kernel@...r.kernel.org,
Sandipan Das <sandipan.das@....com>,
Jing Zhang <renyu.zj@...ux.alibaba.com>,
linuxppc-dev@...ts.ozlabs.org, Kajol Jain <kjain@...ux.ibm.com>
Cc: Stephane Eranian <eranian@...gle.com>,
Perry Taylor <perry.taylor@...el.com>,
Caleb Biggers <caleb.biggers@...el.com>
Subject: Re: [PATCH v2 7/9] perf pmu-events: Introduce pmu_metrics_table
On 21/12/2022 22:34, Ian Rogers wrote:
> Add a metrics table that is just a cast from pmu_events_table. This
> changes the APIs so that event and metric usage of the underlying
> table is different. Later changes will separate the tables.
>
> This introduction fixes a NO_JEVENTS=1 regression on:
> 68: Parse and process metrics : Ok
> 70: Event expansion for cgroups : Ok
> caused by the necessary test metrics not being found.
>
I have just checked some of this code so far...
> Signed-off-by: Ian Rogers <irogers@...gle.com>
> ---
> tools/perf/arch/arm64/util/pmu.c | 23 ++++++++++-
> tools/perf/pmu-events/empty-pmu-events.c | 52 ++++++++++++++++++++----
> tools/perf/pmu-events/jevents.py | 24 ++++++++---
> tools/perf/pmu-events/pmu-events.h | 10 +++--
> tools/perf/tests/expand-cgroup.c | 4 +-
> tools/perf/tests/parse-metric.c | 4 +-
> tools/perf/tests/pmu-events.c | 5 ++-
> tools/perf/util/metricgroup.c | 50 +++++++++++------------
> tools/perf/util/metricgroup.h | 2 +-
> tools/perf/util/pmu.c | 9 +++-
> tools/perf/util/pmu.h | 1 +
> 11 files changed, 133 insertions(+), 51 deletions(-)
>
> diff --git a/tools/perf/arch/arm64/util/pmu.c b/tools/perf/arch/arm64/util/pmu.c
> index 477e513972a4..f8ae479a06db 100644
> --- a/tools/perf/arch/arm64/util/pmu.c
> +++ b/tools/perf/arch/arm64/util/pmu.c
> @@ -19,7 +19,28 @@ const struct pmu_events_table *pmu_events_table__find(void)
> if (pmu->cpus->nr != cpu__max_cpu().cpu)
> return NULL;
>
> - return perf_pmu__find_table(pmu);
> + return perf_pmu__find_events_table(pmu);
> + }
> +
> + return NULL;
> +}
> +
> +const struct pmu_metrics_table *pmu_metrics_table__find(void)
> +{
> + struct perf_pmu *pmu = NULL;
> +
> + while ((pmu = perf_pmu__scan(pmu))) {
> + if (!is_pmu_core(pmu->name))
> + continue;
> +
> + /*
> + * The cpumap should cover all CPUs. Otherwise, some CPUs may
> + * not support some events or have different event IDs.
> + */
> + if (pmu->cpus->nr != cpu__max_cpu().cpu)
> + return NULL;
> +
> + return perf_pmu__find_metrics_table(pmu);
I think that this code will be conflicting with the recent arm64 metric
support. And now it seems even more scope for factoring out code.
> }
>
> return NULL;
> diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c
> index 5572a4d1eddb..d50f60a571dd 100644
> --- a/tools/perf/pmu-events/empty-pmu-events.c
> +++ b/tools/perf/pmu-events/empty-pmu-events.c
> @@ -278,14 +278,12 @@ int pmu_events_table_for_each_event(const struct pmu_events_table *table, pmu_ev
> return 0;
> }
>
> -int pmu_events_table_for_each_metric(const struct pmu_events_table *etable, pmu_metric_iter_fn fn,
> - void *data)
> +int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table, pmu_metric_iter_fn fn,
> + void *data)
> {
> - struct pmu_metrics_table *table = (struct pmu_metrics_table *)etable;
> -
> for (const struct pmu_metric *pm = &table->entries[0]
nit on coding style: do we normally declare local variables like this?
It condenses the code but makes a bit less readable, IMHO
> ; pm->metric_group || pm->metric_name;
> pm++) {
> - int ret = fn(pm, etable, data);
> + int ret = fn(pm, table, data);
>
> if (ret)
> return ret;
> @@ -293,7 +291,7 @@ int pmu_events_table_for_each_metric(const struct pmu_events_table *etable, pmu_
> return 0;
> }
>
> -const struct pmu_events_table *perf_pmu__find_table(struct perf_pmu *pmu)
> +const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
> {
> const struct pmu_events_table *table = NULL;
> char *cpuid = perf_pmu__getcpuid(pmu);
> @@ -321,6 +319,34 @@ const struct pmu_events_table *perf_pmu__find_table(struct perf_pmu *pmu)
> return table;
> }
>
> +const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
> +{
> + const struct pmu_metrics_table *table = NULL;
> + char *cpuid = perf_pmu__getcpuid(pmu);
> + int i;
> +
> + /* on some platforms which uses cpus map, cpuid can be NULL for
> + * PMUs other than CORE PMUs.
> + */
> + if (!cpuid)
> + return NULL;
> +
> + i = 0;
> + for (;;) {
> + const struct pmu_events_map *map = &pmu_events_map[i++];
To me, this is all strange code. Again this is a comment on the current
code: Consider pmu_for_each_sys_event() as an example, we have a while
loop for each member of pmu_sys_event_tables[]. But pmu_sys_event_tables
is hardcoded for a single member, so why loop? It seems the same for all
these "for each" helper in the "empty" events c file.
> +
> + if (!map->cpuid)
> + break;
> +
> + if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
> + table = &map->metric_table;
> + break;
> + }
> + }
> + free(cpuid);
> + return table;
> +}
> +
> const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
> {
> for (const struct pmu_events_map *tables = &pmu_events_map[0];
> @@ -332,6 +358,17 @@ const struct pmu_events_table *find_core_events_table(const char *arch, const ch
> return NULL;
> }
>
> +const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
> +{
> + for (const struct pmu_events_map *tables = &pmu_events_map[0];
> + tables->arch;
> + tables++) {
combine with previous line?
> + if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
> + return &tables->metric_table;
> + }
> + return NULL;
> +}
> +
> int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
> {
> for (const struct pmu_events_map *tables = &pmu_events_map[0];
> @@ -350,8 +387,7 @@ int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
> for (const struct pmu_events_map *tables = &pmu_events_map[0];
> tables->arch;
> tables++) {
> - int ret = pmu_events_table_for_each_metric(
> - (const struct pmu_events_table *)&tables->metric_table, fn, data);
> + int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
>
> if (ret)
> return ret;
> diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
> index 7b9714b25d0a..be2cf8a8779c 100755
> --- a/tools/perf/pmu-events/jevents.py
> +++ b/tools/perf/pmu-events/jevents.py
> @@ -609,17 +609,19 @@ int pmu_events_table_for_each_event(const struct pmu_events_table *table,
> return 0;
> }
>
> -int pmu_events_table_for_each_metric(const struct pmu_events_table *table,
> +int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *mtable,
> pmu_metric_iter_fn fn,
> void *data)
> {
> + struct pmu_events_table *table = (struct pmu_events_table *)mtable;
As I may have hinted before, can we avoid casts like this, even if
transient?
> +
> for (size_t i = 0; i < table->length; i++) {
> struct pmu_metric pm;
> int ret;
>
> decompress_metric(table->entries[i].offset, &pm);
> if (pm.metric_name) {
> - ret = fn(&pm, table, data);
> + ret = fn(&pm, mtable, data);
> if (ret)
> return ret;
> }
Powered by blists - more mailing lists