[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <158350572146.28353.4698357304879018456.tip-bot2@tip-bot2>
Date: Fri, 06 Mar 2020 14:42:01 -0000
From: "tip-bot2 for Ian Rogers" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Ian Rogers <irogers@...gle.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>, x86 <x86@...nel.org>,
LKML <linux-kernel@...r.kernel.org>
Subject: [tip: perf/core] perf/core: Use min_heap in visit_groups_merge()
The following commit has been merged into the perf/core branch of tip:
Commit-ID: 6eef8a7116deae0706ba6d897c0d7dd887cd2be2
Gitweb: https://git.kernel.org/tip/6eef8a7116deae0706ba6d897c0d7dd887cd2be2
Author: Ian Rogers <irogers@...gle.com>
AuthorDate: Thu, 13 Feb 2020 23:51:30 -08:00
Committer: Ingo Molnar <mingo@...nel.org>
CommitterDate: Fri, 06 Mar 2020 11:56:59 +01:00
perf/core: Use min_heap in visit_groups_merge()
visit_groups_merge will pick the next event based on when it was
inserted in to the context (perf_event group_index). Events may be per CPU
or for any CPU, but in the future we'd also like to have per cgroup events
to avoid searching all events for the events to schedule for a cgroup.
Introduce a min heap for the events that maintains a property that the
earliest inserted event is always at the 0th element. Initialize the heap
with per-CPU and any-CPU events for the context.
Based-on-work-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Ian Rogers <irogers@...gle.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Ingo Molnar <mingo@...nel.org>
Link: https://lkml.kernel.org/r/20200214075133.181299-4-irogers@google.com
---
kernel/events/core.c | 67 ++++++++++++++++++++++++++++++++-----------
1 file changed, 51 insertions(+), 16 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dceeeb1..ddfb06c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -49,6 +49,7 @@
#include <linux/sched/mm.h>
#include <linux/proc_ns.h>
#include <linux/mount.h>
+#include <linux/min_heap.h>
#include "internal.h"
@@ -3392,32 +3393,66 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
}
-static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
- int (*func)(struct perf_event *, void *), void *data)
+static bool perf_less_group_idx(const void *l, const void *r)
{
- struct perf_event **evt, *evt1, *evt2;
+ const struct perf_event *le = l, *re = r;
+
+ return le->group_index < re->group_index;
+}
+
+static void swap_ptr(void *l, void *r)
+{
+ void **lp = l, **rp = r;
+
+ swap(*lp, *rp);
+}
+
+static const struct min_heap_callbacks perf_min_heap = {
+ .elem_size = sizeof(struct perf_event *),
+ .less = perf_less_group_idx,
+ .swp = swap_ptr,
+};
+
+static void __heap_add(struct min_heap *heap, struct perf_event *event)
+{
+ struct perf_event **itrs = heap->data;
+
+ if (event) {
+ itrs[heap->nr] = event;
+ heap->nr++;
+ }
+}
+
+static noinline int visit_groups_merge(struct perf_event_groups *groups,
+ int cpu,
+ int (*func)(struct perf_event *, void *),
+ void *data)
+{
+ /* Space for per CPU and/or any CPU event iterators. */
+ struct perf_event *itrs[2];
+ struct min_heap event_heap = {
+ .data = itrs,
+ .nr = 0,
+ .size = ARRAY_SIZE(itrs),
+ };
+ struct perf_event **evt = event_heap.data;
int ret;
- evt1 = perf_event_groups_first(groups, -1);
- evt2 = perf_event_groups_first(groups, cpu);
+ __heap_add(&event_heap, perf_event_groups_first(groups, -1));
+ __heap_add(&event_heap, perf_event_groups_first(groups, cpu));
- while (evt1 || evt2) {
- if (evt1 && evt2) {
- if (evt1->group_index < evt2->group_index)
- evt = &evt1;
- else
- evt = &evt2;
- } else if (evt1) {
- evt = &evt1;
- } else {
- evt = &evt2;
- }
+ min_heapify_all(&event_heap, &perf_min_heap);
+ while (event_heap.nr) {
ret = func(*evt, data);
if (ret)
return ret;
*evt = perf_event_groups_next(*evt);
+ if (*evt)
+ min_heapify(&event_heap, 0, &perf_min_heap);
+ else
+ min_heap_pop(&event_heap, &perf_min_heap);
}
return 0;
Powered by blists - more mailing lists