[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251106071241.141234-2-irogers@google.com>
Date: Wed, 5 Nov 2025 23:12:32 -0800
From: Ian Rogers <irogers@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>, Namhyung Kim <namhyung@...nel.org>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>, Jiri Olsa <jolsa@...nel.org>,
Ian Rogers <irogers@...gle.com>, Adrian Hunter <adrian.hunter@...el.com>,
"Dr. David Alan Gilbert" <linux@...blig.org>, Yang Li <yang.lee@...ux.alibaba.com>,
James Clark <james.clark@...aro.org>, Thomas Falcon <thomas.falcon@...el.com>,
Thomas Richter <tmricht@...ux.ibm.com>, linux-perf-users@...r.kernel.org,
linux-kernel@...r.kernel.org, Andi Kleen <ak@...ux.intel.com>,
Dapeng Mi <dapeng1.mi@...ux.intel.com>
Subject: [PATCH v3 1/9] libperf cpumap: Reduce allocations and sorting in intersect
On hybrid platforms the CPU maps are often disjoint. Rather than copy
CPUs and trim, compute the number of common CPUs, if none early exit,
otherwise copy in an sorted order. This avoids memory allocation in
the disjoint case and avoids a second malloc and useless sort in the
previous trim cases.
Signed-off-by: Ian Rogers <irogers@...gle.com>
---
tools/lib/perf/cpumap.c | 29 +++++++++++++++++++----------
1 file changed, 19 insertions(+), 10 deletions(-)
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
index b20a5280f2b3..7e88417ba84d 100644
--- a/tools/lib/perf/cpumap.c
+++ b/tools/lib/perf/cpumap.c
@@ -453,21 +453,33 @@ int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
struct perf_cpu_map *other)
{
- struct perf_cpu *tmp_cpus;
- int tmp_len;
int i, j, k;
- struct perf_cpu_map *merged = NULL;
+ struct perf_cpu_map *merged;
if (perf_cpu_map__is_subset(other, orig))
return perf_cpu_map__get(orig);
if (perf_cpu_map__is_subset(orig, other))
return perf_cpu_map__get(other);
- tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
- tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
- if (!tmp_cpus)
+ i = j = k = 0;
+ while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
+ if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
+ i++;
+ else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
+ j++;
+ else { /* CPUs match. */
+ i++;
+ j++;
+ k++;
+ }
+ }
+ if (k == 0) /* Maps are completely disjoint. */
return NULL;
+ merged = perf_cpu_map__alloc(k);
+ if (!merged)
+ return NULL;
+ /* Entries are added to merged in sorted order, so no need to sort again. */
i = j = k = 0;
while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
@@ -476,11 +488,8 @@ struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
j++;
else {
j++;
- tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
+ RC_CHK_ACCESS(merged)->map[k++] = __perf_cpu_map__cpu(orig, i++);
}
}
- if (k)
- merged = cpu_map__trim_new(k, tmp_cpus);
- free(tmp_cpus);
return merged;
}
--
2.51.2.1041.gc1ab5b90ca-goog
Powered by blists - more mailing lists