lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191116011845.177150-4-irogers@google.com>
Date:   Fri, 15 Nov 2019 17:18:38 -0800
From:   Ian Rogers <irogers@...gle.com>
To:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Mark Rutland <mark.rutland@....com>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...hat.com>,
        Namhyung Kim <namhyung@...nel.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Masahiro Yamada <yamada.masahiro@...ionext.com>,
        Kees Cook <keescook@...omium.org>,
        Catalin Marinas <catalin.marinas@....com>,
        Petr Mladek <pmladek@...e.com>,
        Mauro Carvalho Chehab <mchehab+samsung@...nel.org>,
        Qian Cai <cai@....pw>, Joe Lawrence <joe.lawrence@...hat.com>,
        Tetsuo Handa <penguin-kernel@...ove.sakura.ne.jp>,
        "Uladzislau Rezki (Sony)" <urezki@...il.com>,
        Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
        Ard Biesheuvel <ardb@...nel.org>,
        "David S. Miller" <davem@...emloft.net>,
        Kent Overstreet <kent.overstreet@...il.com>,
        Gary Hook <Gary.Hook@....com>, Arnd Bergmann <arnd@...db.de>,
        Kan Liang <kan.liang@...ux.intel.com>,
        linux-kernel@...r.kernel.org
Cc:     Stephane Eranian <eranian@...gle.com>,
        Andi Kleen <ak@...ux.intel.com>,
        Ian Rogers <irogers@...gle.com>
Subject: [PATCH v4 03/10] perf: Use min_max_heap in visit_groups_merge

visit_groups_merge will pick the next event based on when it was
inserted in to the context (perf_event group_index). Events may be per CPU
or for any CPU, but in the future we'd also like to have per cgroup events
to avoid searching all events for the events to schedule for a cgroup.
Introduce a min heap for the events that maintains a property that the
earliest inserted event is always at the 0th element. Initialize the heap
with per-CPU and any-CPU events for the context.
Based-on-work-by: Peter Zijlstra (Intel) <peterz@...radead.org>

Signed-off-by: Ian Rogers <irogers@...gle.com>
---
 kernel/events/core.c | 72 +++++++++++++++++++++++++++++++++-----------
 1 file changed, 54 insertions(+), 18 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0dce28b0aae0..b0e89a488e3d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -49,6 +49,7 @@
 #include <linux/sched/mm.h>
 #include <linux/proc_ns.h>
 #include <linux/mount.h>
+#include <linux/min_max_heap.h>
 
 #include "internal.h"
 
@@ -3372,32 +3373,67 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
 	ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
 }
 
-static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
-			      int (*func)(struct perf_event *, void *), void *data)
+static bool perf_cmp_group_idx(const void *l, const void *r)
 {
-	struct perf_event **evt, *evt1, *evt2;
+	const struct perf_event *le = l, *re = r;
+
+	return le->group_index < re->group_index;
+}
+
+static void swap_ptr(void *l, void *r)
+{
+	void **lp = l, **rp = r;
+
+	swap(*lp, *rp);
+}
+
+static const struct min_max_heap_callbacks perf_min_heap = {
+	.elem_size = sizeof(struct perf_event *),
+	.cmp = perf_cmp_group_idx,
+	.swp = swap_ptr,
+};
+
+static void __heap_add(struct min_max_heap *heap, struct perf_event *event)
+{
+	struct perf_event **itrs = heap->data;
+
+	if (event) {
+		itrs[heap->size] = event;
+		heap->size++;
+	}
+}
+
+static noinline int visit_groups_merge(struct perf_event_groups *groups,
+				int cpu,
+				int (*func)(struct perf_event *, void *),
+				void *data)
+{
+	/* Space for per CPU and/or any CPU event iterators. */
+	struct perf_event *itrs[2];
+	struct min_max_heap event_heap = {
+		.data = itrs,
+		.size = 0,
+		.cap = ARRAY_SIZE(itrs),
+	};
+	struct perf_event *next;
 	int ret;
 
-	evt1 = perf_event_groups_first(groups, -1);
-	evt2 = perf_event_groups_first(groups, cpu);
+	__heap_add(&event_heap, perf_event_groups_first(groups, -1));
+	__heap_add(&event_heap, perf_event_groups_first(groups, cpu));
 
-	while (evt1 || evt2) {
-		if (evt1 && evt2) {
-			if (evt1->group_index < evt2->group_index)
-				evt = &evt1;
-			else
-				evt = &evt2;
-		} else if (evt1) {
-			evt = &evt1;
-		} else {
-			evt = &evt2;
-		}
+	min_max_heapify_all(&event_heap, &perf_min_heap);
 
-		ret = func(*evt, data);
+	while (event_heap.size) {
+		ret = func(itrs[0], data);
 		if (ret)
 			return ret;
 
-		*evt = perf_event_groups_next(*evt);
+		next = perf_event_groups_next(itrs[0]);
+		if (next) {
+			min_max_heap_pop_push(&event_heap, &next,
+					&perf_min_heap);
+		} else
+			min_max_heap_pop(&event_heap, &perf_min_heap);
 	}
 
 	return 0;
-- 
2.24.0.432.g9d3f5f5b63-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ