[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <6732ac7aad986d682c6a36db5d435b113c7527d9.camel@intel.com>
Date: Thu, 1 May 2025 20:46:27 +0000
From: "Falcon, Thomas" <thomas.falcon@...el.com>
To: "irogers@...gle.com" <irogers@...gle.com>
CC: "alexander.shishkin@...ux.intel.com" <alexander.shishkin@...ux.intel.com>,
"linux-perf-users@...r.kernel.org" <linux-perf-users@...r.kernel.org>,
"peterz@...radead.org" <peterz@...radead.org>, "acme@...nel.org"
<acme@...nel.org>, "mingo@...hat.com" <mingo@...hat.com>, "Hunter, Adrian"
<adrian.hunter@...el.com>, "namhyung@...nel.org" <namhyung@...nel.org>,
"jolsa@...nel.org" <jolsa@...nel.org>, "linux-kernel@...r.kernel.org"
<linux-kernel@...r.kernel.org>, "kan.liang@...ux.intel.com"
<kan.liang@...ux.intel.com>, "mark.rutland@....com" <mark.rutland@....com>
Subject: Re: [PATCH] perf top: populate PMU capabilities data in perf_env
On Thu, 2025-05-01 at 13:00 -0700, Ian Rogers wrote:
> On Thu, May 1, 2025 at 11:42 AM Thomas Falcon <thomas.falcon@...el.com> wrote:
> >
> > Calling perf top with brach filters enabled on Intel hybrid CPU's
> > with branch counter event logging support results in a segfault.
> >
> > $ ./perf top -e '{cpu_core/cpu-cycles/,cpu_core/event=0xc6,umask=0x3,\
> > frontend=0x11,name=frontend_retired_dsb_miss/}' -j any,counter
> > perf: Segmentation fault
> > -------- backtrace --------
> > ./perf() [0x55f460]
> > /lib64/libc.so.6(+0x1a050) [0x7fd8be227050]
> > ./perf() [0x57b4a7]
> > ./perf() [0x561e5a]
> > ./perf() [0x604a81]
> > ./perf() [0x4395b5]
> > ./perf() [0x601732]
> > ./perf() [0x439bc1]
> > ./perf() [0x5d35b3]
> > ./perf() [0x43936c]
> > /lib64/libc.so.6(+0x70ba8) [0x7fd8be27dba8]
> > /lib64/libc.so.6(+0xf4b8c) [0x7fd8be301b8c]
>
Hi Ian, thanks for reviewing.
> Thanks Thomas. Could you generate this backtrace in GDB? I did write a
> patch to symbolize backtraces like this:
> https://lore.kernel.org/lkml/20250313052952.871958-2-irogers@google.com/
> Sadly without any reviewed tags and unmerged - the code calls routines
> that malloc so it isn't strictly sound if say the backtrace was needed
> from a SEGV in the malloc implementation, it is nicely
> self-referencing the perf APIs, ..
Sorry about that, here is the backtrace I'm seeing when running the perf top command in gdb:
Thread 27 "perf" received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7fffcb7fe6c0 (LWP 812169)]
0x000000000057b4a7 in perf_env.find_br_cntr_info ()
(gdb) backtrace
#0 0x000000000057b4a7 in perf_env.find_br_cntr_info ()
#1 0x0000000000561e5a in addr_map_symbol.account_cycles ()
#2 0x0000000000604a81 in hist.account_cycles ()
#3 0x00000000004395b5 in hist_iter.top_callback ()
#4 0x0000000000601732 in hist_entry_iter.add ()
#5 0x0000000000439bc1 in deliver_event ()
#6 0x00000000005d35b3 in __ordered_events__flush ()
#7 0x000000000043936c in process_thread ()
#8 0x00007ffff6e7dba8 in start_thread (arg=<optimized out>) at pthread_create.c:448
#9 0x00007ffff6f01b8c in __GI___clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:78
Thanks,
Tom
> ```
> status = perf_env__read_cpuid(&perf_env);
> ```
>
> I kind of wish we didn't have the global one as what's the deal with
> ownership with it.
>
> > Fix this by populating cpu_pmu_caps and pmu_caps structures with
> > values from sysfs when calling perf top with branch stack sampling
> > enabled.
>
> I wonder if we could encounter similar problems from say a perf script
> handling live data and so some kind of lazy initialization should be
> employed. It is hard to say without seeing the backtrace.
>
> Thanks,
> Ian
>
> > Signed-off-by: Thomas Falcon <thomas.falcon@...el.com>
> > ---
> > tools/perf/builtin-top.c | 8 +++
> > tools/perf/util/env.c | 114 +++++++++++++++++++++++++++++++++++++++
> > tools/perf/util/env.h | 1 +
> > 3 files changed, 123 insertions(+)
> >
> > diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
> > index 1061f4eebc3f..c2688e4ef3c4 100644
> > --- a/tools/perf/builtin-top.c
> > +++ b/tools/perf/builtin-top.c
> > @@ -1729,6 +1729,14 @@ int cmd_top(int argc, const char **argv)
> > if (opts->branch_stack && callchain_param.enabled)
> > symbol_conf.show_branchflag_count = true;
> >
> > + if (opts->branch_stack) {
> > + status = perf_env__read_core_pmu_caps(&perf_env);
> > + if (status) {
> > + pr_err("PMU capability data is not available\n");
> > + goto out_delete_evlist;
> > + }
> > + }
> > +
> > sort__mode = SORT_MODE__TOP;
> > /* display thread wants entries to be collapsed in a different tree */
> > perf_hpp_list.need_collapse = 1;
> > diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
> > index 36411749e007..37ed6dc52cf3 100644
> > --- a/tools/perf/util/env.c
> > +++ b/tools/perf/util/env.c
> > @@ -416,6 +416,120 @@ static int perf_env__read_nr_cpus_avail(struct perf_env *env)
> > return env->nr_cpus_avail ? 0 : -ENOENT;
> > }
> >
> > +static int __perf_env__read_core_pmu_caps(struct perf_pmu *pmu,
> > + int *nr_caps, char ***caps,
> > + unsigned int *max_branches,
> > + unsigned int *br_cntr_nr,
> > + unsigned int *br_cntr_width)
> > +{
> > + struct perf_pmu_caps *pcaps = NULL;
> > + char *ptr, **tmp;
> > + int ret = 0;
> > +
> > + *nr_caps = 0;
> > + *caps = NULL;
> > +
> > + if (!pmu->nr_caps)
> > + return 0;
> > +
> > + *caps = zalloc(sizeof(char *) * pmu->nr_caps);
> > + if (!*caps)
> > + return -ENOMEM;
> > +
> > + tmp = *caps;
> > + list_for_each_entry(pcaps, &pmu->caps, list) {
> > +
> > + if (asprintf(&ptr, "%s=%s", pcaps->name, pcaps->value) < 0) {
> > + ret = -ENOMEM;
> > + goto error;
> > + }
> > +
> > + *tmp++ = ptr;
> > +
> > + if (!strcmp(pcaps->name, "branches"))
> > + *max_branches = atoi(pcaps->value);
> > +
> > + if (!strcmp(pcaps->name, "branch_counter_nr"))
> > + *br_cntr_nr = atoi(pcaps->value);
> > +
> > + if (!strcmp(pcaps->name, "branch_counter_width"))
> > + *br_cntr_width = atoi(pcaps->value);
> > + }
> > + *nr_caps = pmu->nr_caps;
> > + return 0;
> > +error:
> > + while (tmp-- != *caps)
> > + free(*tmp);
> > + free(*caps);
> > + *caps = NULL;
> > + *nr_caps = 0;
> > + return ret;
> > +}
> > +
> > +int perf_env__read_core_pmu_caps(struct perf_env *env)
> > +{
> > + struct perf_pmu *pmu = NULL;
> > + struct pmu_caps *pmu_caps;
> > + int nr_pmu = 0, i = 0, j;
> > + int ret;
> > +
> > + nr_pmu = perf_pmus__num_core_pmus();
> > +
> > + if (!nr_pmu)
> > + return -ENODEV;
> > +
> > + if (nr_pmu == 1) {
> > + pmu = perf_pmus__scan_core(NULL);
> > + if (!pmu)
> > + return -ENODEV;
> > + ret = perf_pmu__caps_parse(pmu);
> > + if (ret < 0)
> > + return ret;
> > + return __perf_env__read_core_pmu_caps(pmu, &env->nr_cpu_pmu_caps,
> > + &env->cpu_pmu_caps,
> > + &env->max_branches,
> > + &env->br_cntr_nr,
> > + &env->br_cntr_width);
> > + }
> > +
> > + pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu);
> > + if (!pmu_caps)
> > + return -ENOMEM;
> > +
> > + while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
> > + if (perf_pmu__caps_parse(pmu) <= 0)
> > + continue;
> > + ret = __perf_env__read_core_pmu_caps(pmu, &pmu_caps[i].nr_caps,
> > + &pmu_caps[i].caps,
> > + &pmu_caps[i].max_branches,
> > + &pmu_caps[i].br_cntr_nr,
> > + &pmu_caps[i].br_cntr_width);
> > + if (ret)
> > + goto error;
> > +
> > + pmu_caps[i].pmu_name = strdup(pmu->name);
> > + if (!pmu_caps[i].pmu_name) {
> > + ret = -ENOMEM;
> > + goto error;
> > + }
> > + i++;
> > + }
> > +
> > + env->nr_pmus_with_caps = nr_pmu;
> > + env->pmu_caps = pmu_caps;
> > +
> > + return 0;
> > +error:
> > + for (i = 0; i < nr_pmu; i++) {
> > + for (j = 0; j < pmu_caps[i].nr_caps; j++)
> > + free(pmu_caps[i].caps[j]);
> > + free(pmu_caps[i].caps);
> > + free(pmu_caps[i].pmu_name);
> > + }
> > + free(pmu_caps);
> > + return ret;
> > +}
> > +
> > const char *perf_env__raw_arch(struct perf_env *env)
> > {
> > return env && !perf_env__read_arch(env) ? env->arch : "unknown";
> > diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
> > index d90e343cf1fa..135a1f714905 100644
> > --- a/tools/perf/util/env.h
> > +++ b/tools/perf/util/env.h
> > @@ -152,6 +152,7 @@ struct btf_node;
> >
> > extern struct perf_env perf_env;
> >
> > +int perf_env__read_core_pmu_caps(struct perf_env *env);
> > void perf_env__exit(struct perf_env *env);
> >
> > int perf_env__kernel_is_64_bit(struct perf_env *env);
> > --
> > 2.49.0
> >
Powered by blists - more mailing lists