[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAP-5=fUhaOgMZuhhsmjkTL6QqEQzcjq9GjJyteLG5RTWuBQ0gg@mail.gmail.com>
Date: Wed, 6 Dec 2023 13:04:14 -0800
From: Ian Rogers <irogers@...gle.com>
To: kan.liang@...ux.intel.com
Cc: acme@...nel.org, peterz@...radead.org, mingo@...hat.com,
namhyung@...nel.org, jolsa@...nel.org, adrian.hunter@...el.com,
john.g.garry@...cle.com, will@...nel.org, james.clark@....com,
mike.leach@...aro.org, leo.yan@...aro.org,
yuhaixin.yhx@...ux.alibaba.com, renyu.zj@...ux.alibaba.com,
tmricht@...ux.ibm.com, ravi.bangoria@....com,
linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org
Subject: Re: [PATCH 2/6] perf mem: Clean up perf_mem_events__ptr()
On Wed, Dec 6, 2023 at 12:13 PM <kan.liang@...ux.intel.com> wrote:
>
> From: Kan Liang <kan.liang@...ux.intel.com>
>
> The mem_events can be retrieved from the struct perf_pmu now. An ARCH
> specific perf_mem_events__ptr() is not required anymore. Remove all of
> them.
>
> The Intel hybrid has multiple mem-events-supported PMUs. But they share
> the same mem_events. Other ARCHs only support one mem-events-supported
> PMU. In the configuration, it's good enough to only configure the
> mem_events for one PMU. Add perf_mem_events_find_pmu() which returns the
> first mem-events-supported PMU.
>
> In the perf_mem_events__init(), the perf_pmus__scan() is not required
> anymore. It avoids checking the sysfs for every PMU on the system.
>
> Make the perf_mem_events__record_args() more generic. Remove the
> perf_mem_events__print_unsupport_hybrid().
>
> Signed-off-by: Kan Liang <kan.liang@...ux.intel.com>
Reviewed-by: Ian Rogers <irogers@...gle.com>
> ---
> tools/perf/arch/arm64/util/mem-events.c | 10 +--
> tools/perf/arch/x86/util/mem-events.c | 18 ++---
> tools/perf/builtin-c2c.c | 28 +++++--
> tools/perf/builtin-mem.c | 28 +++++--
> tools/perf/util/mem-events.c | 103 ++++++++++++------------
> tools/perf/util/mem-events.h | 9 ++-
> 6 files changed, 104 insertions(+), 92 deletions(-)
>
> diff --git a/tools/perf/arch/arm64/util/mem-events.c b/tools/perf/arch/arm64/util/mem-events.c
> index aaa4804922b4..2602e8688727 100644
> --- a/tools/perf/arch/arm64/util/mem-events.c
> +++ b/tools/perf/arch/arm64/util/mem-events.c
> @@ -12,17 +12,9 @@ struct perf_mem_event perf_mem_events_arm[PERF_MEM_EVENTS__MAX] = {
>
> static char mem_ev_name[100];
>
> -struct perf_mem_event *perf_mem_events__ptr(int i)
> -{
> - if (i >= PERF_MEM_EVENTS__MAX)
> - return NULL;
> -
> - return &perf_mem_events_arm[i];
> -}
> -
> const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
> {
> - struct perf_mem_event *e = perf_mem_events__ptr(i);
> + struct perf_mem_event *e = &perf_mem_events_arm[i];
>
> if (i >= PERF_MEM_EVENTS__MAX)
> return NULL;
> diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c
> index 2b81d229982c..5fb41d50118d 100644
> --- a/tools/perf/arch/x86/util/mem-events.c
> +++ b/tools/perf/arch/x86/util/mem-events.c
> @@ -28,17 +28,6 @@ struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX] = {
> E("mem-ldst", "ibs_op//", "ibs_op"),
> };
>
> -struct perf_mem_event *perf_mem_events__ptr(int i)
> -{
> - if (i >= PERF_MEM_EVENTS__MAX)
> - return NULL;
> -
> - if (x86__is_amd_cpu())
> - return &perf_mem_events_amd[i];
> -
> - return &perf_mem_events_intel[i];
> -}
> -
> bool is_mem_loads_aux_event(struct evsel *leader)
> {
> struct perf_pmu *pmu = perf_pmus__find("cpu");
> @@ -54,7 +43,12 @@ bool is_mem_loads_aux_event(struct evsel *leader)
>
> const char *perf_mem_events__name(int i, const char *pmu_name)
> {
> - struct perf_mem_event *e = perf_mem_events__ptr(i);
> + struct perf_mem_event *e;
> +
> + if (x86__is_amd_cpu())
> + e = &perf_mem_events_amd[i];
> + else
> + e = &perf_mem_events_intel[i];
>
> if (!e)
> return NULL;
> diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
> index a4cf9de7a7b5..76c760be1bcf 100644
> --- a/tools/perf/builtin-c2c.c
> +++ b/tools/perf/builtin-c2c.c
> @@ -3215,12 +3215,19 @@ static int parse_record_events(const struct option *opt,
> const char *str, int unset __maybe_unused)
> {
> bool *event_set = (bool *) opt->value;
> + struct perf_pmu *pmu;
> +
> + pmu = perf_mem_events_find_pmu();
> + if (!pmu) {
> + pr_err("failed: there is no PMU that supports perf c2c\n");
> + exit(-1);
> + }
>
> if (!strcmp(str, "list")) {
> - perf_mem_events__list();
> + perf_mem_events__list(pmu);
> exit(0);
> }
> - if (perf_mem_events__parse(str))
> + if (perf_mem_events__parse(pmu, str))
> exit(-1);
>
> *event_set = true;
> @@ -3245,6 +3252,7 @@ static int perf_c2c__record(int argc, const char **argv)
> bool all_user = false, all_kernel = false;
> bool event_set = false;
> struct perf_mem_event *e;
> + struct perf_pmu *pmu;
> struct option options[] = {
> OPT_CALLBACK('e', "event", &event_set, "event",
> "event selector. Use 'perf c2c record -e list' to list available events",
> @@ -3256,7 +3264,13 @@ static int perf_c2c__record(int argc, const char **argv)
> OPT_END()
> };
>
> - if (perf_mem_events__init()) {
> + pmu = perf_mem_events_find_pmu();
> + if (!pmu) {
> + pr_err("failed: no PMU supports the memory events\n");
> + return -1;
> + }
> +
> + if (perf_mem_events__init(pmu)) {
> pr_err("failed: memory events not supported\n");
> return -1;
> }
> @@ -3280,7 +3294,7 @@ static int perf_c2c__record(int argc, const char **argv)
> rec_argv[i++] = "record";
>
> if (!event_set) {
> - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE);
> + e = perf_mem_events__ptr(pmu, PERF_MEM_EVENTS__LOAD_STORE);
> /*
> * The load and store operations are required, use the event
> * PERF_MEM_EVENTS__LOAD_STORE if it is supported.
> @@ -3289,15 +3303,15 @@ static int perf_c2c__record(int argc, const char **argv)
> e->record = true;
> rec_argv[i++] = "-W";
> } else {
> - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
> + e = perf_mem_events__ptr(pmu, PERF_MEM_EVENTS__LOAD);
Fwiw, it seems strange in cases like this that the function isn't:
perf_pmu__mem_events_ptr(pmu, PERF_MEM_EVENTS__LOAD)
Thanks,
Ian
> e->record = true;
>
> - e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE);
> + e = perf_mem_events__ptr(pmu, PERF_MEM_EVENTS__STORE);
> e->record = true;
> }
> }
>
> - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
> + e = perf_mem_events__ptr(pmu, PERF_MEM_EVENTS__LOAD);
> if (e->record)
> rec_argv[i++] = "-W";
>
> diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
> index 51499c20da01..8218c4721101 100644
> --- a/tools/perf/builtin-mem.c
> +++ b/tools/perf/builtin-mem.c
> @@ -43,12 +43,19 @@ static int parse_record_events(const struct option *opt,
> const char *str, int unset __maybe_unused)
> {
> struct perf_mem *mem = *(struct perf_mem **)opt->value;
> + struct perf_pmu *pmu;
> +
> + pmu = perf_mem_events_find_pmu();
> + if (!pmu) {
> + pr_err("failed: there is no PMU that supports perf mem\n");
> + exit(-1);
> + }
>
> if (!strcmp(str, "list")) {
> - perf_mem_events__list();
> + perf_mem_events__list(pmu);
> exit(0);
> }
> - if (perf_mem_events__parse(str))
> + if (perf_mem_events__parse(pmu, str))
> exit(-1);
>
> mem->operation = 0;
> @@ -72,6 +79,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
> int ret;
> bool all_user = false, all_kernel = false;
> struct perf_mem_event *e;
> + struct perf_pmu *pmu;
> struct option options[] = {
> OPT_CALLBACK('e', "event", &mem, "event",
> "event selector. use 'perf mem record -e list' to list available events",
> @@ -84,7 +92,13 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
> OPT_END()
> };
>
> - if (perf_mem_events__init()) {
> + pmu = perf_mem_events_find_pmu();
> + if (!pmu) {
> + pr_err("failed: no PMU supports the memory events\n");
> + return -1;
> + }
> +
> + if (perf_mem_events__init(pmu)) {
> pr_err("failed: memory events not supported\n");
> return -1;
> }
> @@ -113,7 +127,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
>
> rec_argv[i++] = "record";
>
> - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE);
> + e = perf_mem_events__ptr(pmu, PERF_MEM_EVENTS__LOAD_STORE);
>
> /*
> * The load and store operations are required, use the event
> @@ -126,17 +140,17 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
> rec_argv[i++] = "-W";
> } else {
> if (mem->operation & MEM_OPERATION_LOAD) {
> - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
> + e = perf_mem_events__ptr(pmu, PERF_MEM_EVENTS__LOAD);
> e->record = true;
> }
>
> if (mem->operation & MEM_OPERATION_STORE) {
> - e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE);
> + e = perf_mem_events__ptr(pmu, PERF_MEM_EVENTS__STORE);
> e->record = true;
> }
> }
>
> - e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
> + e = perf_mem_events__ptr(pmu, PERF_MEM_EVENTS__LOAD);
> if (e->record)
> rec_argv[i++] = "-W";
>
> diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
> index 0a8f415f5efe..887ffdcce338 100644
> --- a/tools/perf/util/mem-events.c
> +++ b/tools/perf/util/mem-events.c
> @@ -29,17 +29,42 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
> static char mem_loads_name[100];
> static bool mem_loads_name__init;
>
> -struct perf_mem_event * __weak perf_mem_events__ptr(int i)
> +struct perf_mem_event *perf_mem_events__ptr(struct perf_pmu *pmu, int i)
> {
> - if (i >= PERF_MEM_EVENTS__MAX)
> + if (i >= PERF_MEM_EVENTS__MAX || !pmu)
> return NULL;
>
> - return &perf_mem_events[i];
> + return &pmu->mem_events[i];
> +}
> +
> +static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu)
> +{
> + while ((pmu = perf_pmus__scan(pmu)) != NULL) {
> + if (pmu->mem_events)
> + return pmu;
> + }
> + return NULL;
> +}
> +
> +struct perf_pmu *perf_mem_events_find_pmu(void)
> +{
> + /*
> + * The current perf mem doesn't support per-PMU configuration.
> + * The exact same configuration is applied to all the
> + * mem_events supported PMUs.
> + * Return the first mem_events supported PMU.
> + *
> + * Notes: The only case which may support multiple mem_events
> + * supported PMUs is Intel hybrid. The exact same mem_events
> + * is shared among the PMUs. Only configure the first PMU
> + * is good enough as well.
> + */
> + return perf_pmus__scan_mem(NULL);
> }
>
> const char * __weak perf_mem_events__name(int i, const char *pmu_name __maybe_unused)
> {
> - struct perf_mem_event *e = perf_mem_events__ptr(i);
> + struct perf_mem_event *e = &perf_mem_events[i];
>
> if (!e)
> return NULL;
> @@ -61,7 +86,7 @@ __weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused)
> return false;
> }
>
> -int perf_mem_events__parse(const char *str)
> +int perf_mem_events__parse(struct perf_pmu *pmu, const char *str)
> {
> char *tok, *saveptr = NULL;
> bool found = false;
> @@ -79,7 +104,7 @@ int perf_mem_events__parse(const char *str)
>
> while (tok) {
> for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
> - struct perf_mem_event *e = perf_mem_events__ptr(j);
> + struct perf_mem_event *e = perf_mem_events__ptr(pmu, j);
>
> if (!e->tag)
> continue;
> @@ -112,7 +137,7 @@ static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu,
> return !stat(path, &st);
> }
>
> -int perf_mem_events__init(void)
> +int perf_mem_events__init(struct perf_pmu *pmu)
> {
> const char *mnt = sysfs__mount();
> bool found = false;
> @@ -122,8 +147,7 @@ int perf_mem_events__init(void)
> return -ENOENT;
>
> for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
> - struct perf_mem_event *e = perf_mem_events__ptr(j);
> - struct perf_pmu *pmu = NULL;
> + struct perf_mem_event *e = perf_mem_events__ptr(pmu, j);
>
> /*
> * If the event entry isn't valid, skip initialization
> @@ -132,29 +156,20 @@ int perf_mem_events__init(void)
> if (!e->tag)
> continue;
>
> - /*
> - * Scan all PMUs not just core ones, since perf mem/c2c on
> - * platforms like AMD uses IBS OP PMU which is independent
> - * of core PMU.
> - */
> - while ((pmu = perf_pmus__scan(pmu)) != NULL) {
> - e->supported |= perf_mem_event__supported(mnt, pmu, e);
> - if (e->supported) {
> - found = true;
> - break;
> - }
> - }
> + e->supported |= perf_mem_event__supported(mnt, pmu, e);
> + if (e->supported)
> + found = true;
> }
>
> return found ? 0 : -ENOENT;
> }
>
> -void perf_mem_events__list(void)
> +void perf_mem_events__list(struct perf_pmu *pmu)
> {
> int j;
>
> for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
> - struct perf_mem_event *e = perf_mem_events__ptr(j);
> + struct perf_mem_event *e = perf_mem_events__ptr(pmu, j);
>
> fprintf(stderr, "%-*s%-*s%s",
> e->tag ? 13 : 0,
> @@ -165,50 +180,32 @@ void perf_mem_events__list(void)
> }
> }
>
> -static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
> - int idx)
> -{
> - const char *mnt = sysfs__mount();
> - struct perf_pmu *pmu = NULL;
> -
> - while ((pmu = perf_pmus__scan(pmu)) != NULL) {
> - if (!perf_mem_event__supported(mnt, pmu, e)) {
> - pr_err("failed: event '%s' not supported\n",
> - perf_mem_events__name(idx, pmu->name));
> - }
> - }
> -}
> -
> int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
> char **rec_tmp, int *tmp_nr)
> {
> const char *mnt = sysfs__mount();
> + struct perf_pmu *pmu = NULL;
> int i = *argv_nr, k = 0;
> struct perf_mem_event *e;
>
> - for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
> - e = perf_mem_events__ptr(j);
> - if (!e->record)
> - continue;
>
> - if (perf_pmus__num_mem_pmus() == 1) {
> - if (!e->supported) {
> - pr_err("failed: event '%s' not supported\n",
> - perf_mem_events__name(j, NULL));
> - return -1;
> - }
> + while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) {
> + for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
> + e = perf_mem_events__ptr(pmu, j);
>
> - rec_argv[i++] = "-e";
> - rec_argv[i++] = perf_mem_events__name(j, NULL);
> - } else {
> - struct perf_pmu *pmu = NULL;
> + if (!e->record)
> + continue;
>
> if (!e->supported) {
> - perf_mem_events__print_unsupport_hybrid(e, j);
> + pr_err("failed: event '%s' not supported\n",
> + perf_mem_events__name(j, pmu->name));
> return -1;
> }
>
> - while ((pmu = perf_pmus__scan(pmu)) != NULL) {
> + if (perf_pmus__num_mem_pmus() == 1) {
> + rec_argv[i++] = "-e";
> + rec_argv[i++] = perf_mem_events__name(j, NULL);
> + } else {
> const char *s = perf_mem_events__name(j, pmu->name);
>
> if (!perf_mem_event__supported(mnt, pmu, e))
> diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
> index 8c5694b2d0b0..59a4303aac96 100644
> --- a/tools/perf/util/mem-events.h
> +++ b/tools/perf/util/mem-events.h
> @@ -36,14 +36,15 @@ enum {
> extern unsigned int perf_mem_events__loads_ldlat;
> extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX];
>
> -int perf_mem_events__parse(const char *str);
> -int perf_mem_events__init(void);
> +int perf_mem_events__parse(struct perf_pmu *pmu, const char *str);
> +int perf_mem_events__init(struct perf_pmu *pmu);
>
> const char *perf_mem_events__name(int i, const char *pmu_name);
> -struct perf_mem_event *perf_mem_events__ptr(int i);
> +struct perf_mem_event *perf_mem_events__ptr(struct perf_pmu *pmu, int i);
> +struct perf_pmu *perf_mem_events_find_pmu(void);
> bool is_mem_loads_aux_event(struct evsel *leader);
>
> -void perf_mem_events__list(void);
> +void perf_mem_events__list(struct perf_pmu *pmu);
> int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
> char **rec_tmp, int *tmp_nr);
>
> --
> 2.35.1
>
Powered by blists - more mailing lists