[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAP-5=fVHetc8DqdqxURJm_VtaH6apJKoyVOSpfQrE2ntkEa+4g@mail.gmail.com>
Date: Mon, 6 Oct 2025 09:18:22 -0700
From: Ian Rogers <irogers@...gle.com>
To: Thomas Falcon <thomas.falcon@...el.com>, Namhyung Kim <namhyung@...nel.org>,
Jiri Olsa <jolsa@...nel.org>, Song Liu <songliubraving@...com>,
Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: bpf@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org, Howard Chu <howardchu95@...il.com>,
Gabriele Monaco <gmonaco@...hat.com>, Athira Rajeev <atrajeev@...ux.vnet.ibm.com>,
James Clark <james.clark@...aro.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Adrian Hunter <adrian.hunter@...el.com>, Tengda Wu <wutengda@...weicloud.com>,
Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>
Subject: Re: [PATCH v1 2/2] perf bpf_counter: Fix handling of cpumap fixing hybrid
On Wed, Oct 1, 2025 at 11:12 AM Ian Rogers <irogers@...gle.com> wrote:
>
> Don't open evsels on all CPUs, open them just on the CPUs they
> support. This avoids opening say an e-core event on a p-core and
> getting a failure - achieve this by getting rid of the "all_cpu_map".
>
> In install_pe functions don't use the cpu_map_idx as a CPU number,
> translate the cpu_map_idx, which is a dense index into the cpu_map
> skipping holes at the beginning, to a proper CPU number.
>
> Before:
> ```
> $ perf stat --bpf-counters -a -e cycles,instructions -- sleep 1
>
> Performance counter stats for 'system wide':
>
> <not supported> cpu_atom/cycles/
> 566,270,672 cpu_core/cycles/
> <not supported> cpu_atom/instructions/
> 572,792,836 cpu_core/instructions/ # 1.01 insn per cycle
>
> 1.001595384 seconds time elapsed
> ```
>
> After:
> ```
> $ perf stat --bpf-counters -a -e cycles,instructions -- sleep 1
>
> Performance counter stats for 'system wide':
>
> 443,299,201 cpu_atom/cycles/
> 1,233,919,737 cpu_core/cycles/
> 213,634,112 cpu_atom/instructions/ # 0.48 insn per cycle
> 2,758,965,527 cpu_core/instructions/ # 2.24 insn per cycle
>
> 1.001699485 seconds time elapsed
> ```
>
> Fixes: 7fac83aaf2ee ("perf stat: Introduce 'bperf' to share hardware PMCs with BPF")
> Signed-off-by: Ian Rogers <irogers@...gle.com>
+Thomas Falcon
I think it'd be nice to get this quite major fix for
--bpf-counters/bperf for hybrid architectures into v6.18 and stable
builds. Thomas would it be possible for you to give a Tested-by tag
using the reproduction in the commit message?
Thanks,
Ian
> ---
> tools/perf/util/bpf_counter.c | 26 ++++++++++----------------
> tools/perf/util/bpf_counter_cgroup.c | 3 ++-
> 2 files changed, 12 insertions(+), 17 deletions(-)
>
> diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
> index 1c6cb5ea077e..ca5d01b9017d 100644
> --- a/tools/perf/util/bpf_counter.c
> +++ b/tools/perf/util/bpf_counter.c
> @@ -336,6 +336,7 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx
> {
> struct bpf_prog_profiler_bpf *skel;
> struct bpf_counter *counter;
> + int cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx).cpu;
> int ret;
>
> list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
> @@ -343,7 +344,7 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu_map_idx
> assert(skel != NULL);
>
> ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events),
> - &cpu_map_idx, &fd, BPF_ANY);
> + &cpu, &fd, BPF_ANY);
> if (ret)
> return ret;
> }
> @@ -451,7 +452,6 @@ static int bperf_check_target(struct evsel *evsel,
> return 0;
> }
>
> -static struct perf_cpu_map *all_cpu_map;
> static __u32 filter_entry_cnt;
>
> static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
> @@ -495,7 +495,7 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
> * following evsel__open_per_cpu call
> */
> evsel->leader_skel = skel;
> - evsel__open_per_cpu(evsel, all_cpu_map, -1);
> + evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
>
> out:
> bperf_leader_bpf__destroy(skel);
> @@ -533,12 +533,6 @@ static int bperf__load(struct evsel *evsel, struct target *target)
> if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
> return -1;
>
> - if (!all_cpu_map) {
> - all_cpu_map = perf_cpu_map__new_online_cpus();
> - if (!all_cpu_map)
> - return -1;
> - }
> -
> evsel->bperf_leader_prog_fd = -1;
> evsel->bperf_leader_link_fd = -1;
>
> @@ -656,9 +650,10 @@ static int bperf__load(struct evsel *evsel, struct target *target)
> static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
> {
> struct bperf_leader_bpf *skel = evsel->leader_skel;
> + int cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx).cpu;
>
> return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
> - &cpu_map_idx, &fd, BPF_ANY);
> + &cpu, &fd, BPF_ANY);
> }
>
> /*
> @@ -667,13 +662,12 @@ static int bperf__install_pe(struct evsel *evsel, int cpu_map_idx, int fd)
> */
> static int bperf_sync_counters(struct evsel *evsel)
> {
> - int num_cpu, i, cpu;
> + struct perf_cpu cpu;
> + int idx;
> +
> + perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus)
> + bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu.cpu);
>
> - num_cpu = perf_cpu_map__nr(all_cpu_map);
> - for (i = 0; i < num_cpu; i++) {
> - cpu = perf_cpu_map__cpu(all_cpu_map, i).cpu;
> - bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
> - }
> return 0;
> }
>
> diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
> index ed6a29b106b4..690be3ce3e11 100644
> --- a/tools/perf/util/bpf_counter_cgroup.c
> +++ b/tools/perf/util/bpf_counter_cgroup.c
> @@ -186,7 +186,8 @@ static int bperf_cgrp__load(struct evsel *evsel,
> }
>
> static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
> - int cpu __maybe_unused, int fd __maybe_unused)
> + int cpu_map_idx __maybe_unused,
> + int fd __maybe_unused)
> {
> /* nothing to do */
> return 0;
> --
> 2.51.0.618.g983fd99d29-goog
>
Powered by blists - more mailing lists