lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191114003042.85252-8-irogers@google.com>
Date:   Wed, 13 Nov 2019 16:30:39 -0800
From:   Ian Rogers <irogers@...gle.com>
To:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Mark Rutland <mark.rutland@....com>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...hat.com>,
        Namhyung Kim <namhyung@...nel.org>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Masahiro Yamada <yamada.masahiro@...ionext.com>,
        Kees Cook <keescook@...omium.org>,
        Catalin Marinas <catalin.marinas@....com>,
        Petr Mladek <pmladek@...e.com>,
        Mauro Carvalho Chehab <mchehab+samsung@...nel.org>,
        Qian Cai <cai@....pw>, Joe Lawrence <joe.lawrence@...hat.com>,
        Tetsuo Handa <penguin-kernel@...ove.sakura.ne.jp>,
        Sri Krishna chowdary <schowdary@...dia.com>,
        "Uladzislau Rezki (Sony)" <urezki@...il.com>,
        Andy Shevchenko <andriy.shevchenko@...ux.intel.com>,
        Changbin Du <changbin.du@...el.com>,
        Ard Biesheuvel <ardb@...nel.org>,
        "David S. Miller" <davem@...emloft.net>,
        Kent Overstreet <kent.overstreet@...il.com>,
        Gary Hook <Gary.Hook@....com>, Arnd Bergmann <arnd@...db.de>,
        Kan Liang <kan.liang@...ux.intel.com>,
        linux-kernel@...r.kernel.org
Cc:     Stephane Eranian <eranian@...gle.com>,
        Andi Kleen <ak@...ux.intel.com>,
        Ian Rogers <irogers@...gle.com>
Subject: [PATCH v3 07/10] perf: simplify and rename visit_groups_merge

To enable a future caching optimization, pass in whether
visit_groups_merge is operating on pinned or flexible groups. The
is_pinned argument makes the func argument redundant, rename the
function to ctx_groups_sched_in as it just schedules pinned or flexible
groups in. Compute the cpu and groups arguments locally to reduce the
argument list size. Remove sched_in_data as it repeats arguments already
passed in. Remove the unused data argument to pinned_sched_in.

Signed-off-by: Ian Rogers <irogers@...gle.com>
---
 kernel/events/core.c | 106 +++++++++++++++++--------------------------
 1 file changed, 41 insertions(+), 65 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index cb5fc47611c7..11594d8bbb2e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3509,10 +3509,18 @@ static void __heap_add(struct min_max_heap *heap, struct perf_event *event)
 	}
 }
 
-static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
-				struct perf_event_groups *groups, int cpu,
-				int (*func)(struct perf_event *, void *),
-				void *data)
+static int pinned_sched_in(struct perf_event_context *ctx,
+			struct perf_cpu_context *cpuctx,
+			struct perf_event *event);
+
+static int flexible_sched_in(struct perf_event_context *ctx,
+			struct perf_cpu_context *cpuctx,
+			struct perf_event *event,
+			int *can_add_hw);
+
+static int ctx_groups_sched_in(struct perf_event_context *ctx,
+			struct perf_cpu_context *cpuctx,
+			bool is_pinned)
 {
 #ifdef CONFIG_CGROUP_PERF
 	struct cgroup_subsys_state *css = NULL;
@@ -3522,9 +3530,13 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
 	struct min_max_heap event_heap;
 	struct perf_event **evt;
 	struct perf_event *next;
-	int ret;
+	int ret, can_add_hw = 1;
+	int cpu = smp_processor_id();
+	struct perf_event_groups *groups = is_pinned
+		? &ctx->pinned_groups
+		: &ctx->flexible_groups;
 
-	if (cpuctx) {
+	if (ctx == &cpuctx->ctx) {
 		event_heap = (struct min_max_heap){
 			.data = cpuctx->itr_storage,
 			.size = 0,
@@ -3562,7 +3574,11 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
 	heapify_all(&event_heap, &perf_min_heap);
 
 	while (event_heap.size) {
-		ret = func(*evt, data);
+		if (is_pinned)
+			ret = pinned_sched_in(ctx, cpuctx, *evt);
+		else
+			ret = flexible_sched_in(ctx, cpuctx, *evt, &can_add_hw);
+
 		if (ret)
 			return ret;
 
@@ -3576,25 +3592,19 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
 	return 0;
 }
 
-struct sched_in_data {
-	struct perf_event_context *ctx;
-	struct perf_cpu_context *cpuctx;
-	int can_add_hw;
-};
-
-static int pinned_sched_in(struct perf_event *event, void *data)
+static int pinned_sched_in(struct perf_event_context *ctx,
+			struct perf_cpu_context *cpuctx,
+			struct perf_event *event)
 {
-	struct sched_in_data *sid = data;
-
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
 	if (!event_filter_match(event))
 		return 0;
 
-	if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
-		if (!group_sched_in(event, sid->cpuctx, sid->ctx))
-			list_add_tail(&event->active_list, &sid->ctx->pinned_active);
+	if (group_can_go_on(event, cpuctx, 1)) {
+		if (!group_sched_in(event, cpuctx, ctx))
+			list_add_tail(&event->active_list, &ctx->pinned_active);
 	}
 
 	/*
@@ -3607,65 +3617,30 @@ static int pinned_sched_in(struct perf_event *event, void *data)
 	return 0;
 }
 
-static int flexible_sched_in(struct perf_event *event, void *data)
+static int flexible_sched_in(struct perf_event_context *ctx,
+			struct perf_cpu_context *cpuctx,
+			struct perf_event *event,
+			int *can_add_hw)
 {
-	struct sched_in_data *sid = data;
-
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
 	if (!event_filter_match(event))
 		return 0;
 
-	if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
-		int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
+	if (group_can_go_on(event, cpuctx, *can_add_hw)) {
+		int ret = group_sched_in(event, cpuctx, ctx);
 		if (ret) {
-			sid->can_add_hw = 0;
-			sid->ctx->rotate_necessary = 1;
+			*can_add_hw = 0;
+			ctx->rotate_necessary = 1;
 			return 0;
 		}
-		list_add_tail(&event->active_list, &sid->ctx->flexible_active);
+		list_add_tail(&event->active_list, &ctx->flexible_active);
 	}
 
 	return 0;
 }
 
-static void
-ctx_pinned_sched_in(struct perf_event_context *ctx,
-		    struct perf_cpu_context *cpuctx)
-{
-	struct sched_in_data sid = {
-		.ctx = ctx,
-		.cpuctx = cpuctx,
-		.can_add_hw = 1,
-	};
-
-	if (ctx != &cpuctx->ctx)
-		cpuctx = NULL;
-
-	visit_groups_merge(cpuctx, &ctx->pinned_groups,
-			   smp_processor_id(),
-			   pinned_sched_in, &sid);
-}
-
-static void
-ctx_flexible_sched_in(struct perf_event_context *ctx,
-		      struct perf_cpu_context *cpuctx)
-{
-	struct sched_in_data sid = {
-		.ctx = ctx,
-		.cpuctx = cpuctx,
-		.can_add_hw = 1,
-	};
-
-	if (ctx != &cpuctx->ctx)
-		cpuctx = NULL;
-
-	visit_groups_merge(cpuctx, &ctx->flexible_groups,
-			   smp_processor_id(),
-			   flexible_sched_in, &sid);
-}
-
 static void
 ctx_sched_in(struct perf_event_context *ctx,
 	     struct perf_cpu_context *cpuctx,
@@ -3702,11 +3677,12 @@ ctx_sched_in(struct perf_event_context *ctx,
 	 * in order to give them the best chance of going on.
 	 */
 	if (is_active & EVENT_PINNED)
-		ctx_pinned_sched_in(ctx, cpuctx);
+		ctx_groups_sched_in(ctx, cpuctx, /*is_pinned=*/true);
+
 
 	/* Then walk through the lower prio flexible groups */
 	if (is_active & EVENT_FLEXIBLE)
-		ctx_flexible_sched_in(ctx, cpuctx);
+		ctx_groups_sched_in(ctx, cpuctx, /*is_pinned=*/false);
 }
 
 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
-- 
2.24.0.432.g9d3f5f5b63-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ