lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sun,  8 Nov 2009 21:13:25 +0100
From:	Frederic Weisbecker <fweisbec@...il.com>
To:	Ingo Molnar <mingo@...e.hu>
Cc:	LKML <linux-kernel@...r.kernel.org>,
	Frederic Weisbecker <fweisbec@...il.com>,
	Peter Zijlstra <peterz@...radead.org>,
	Arnaldo Carvalho de Melo <acme@...hat.com>,
	Mike Galbraith <efault@....de>,
	Paul Mackerras <paulus@...ba.org>,
	Thomas Gleixner <tglx@...utronix.de>
Subject: [RFC PATCH 3/4] perf/core: Split up pinned and non pinned processing

Split up pinned and non-pinned events processing in two helpers
so that it's more flexible to handle them seperately.

Signed-off-by: Frederic Weisbecker <fweisbec@...il.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Arnaldo Carvalho de Melo <acme@...hat.com>
Cc: Mike Galbraith <efault@....de>
Cc: Paul Mackerras <paulus@...ba.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
---
 kernel/perf_event.c |   51 +++++++++++++++++++++++++++++++++++----------------
 1 files changed, 35 insertions(+), 16 deletions(-)

diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 0432c1c..50f2997 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1246,25 +1246,11 @@ static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
 }
 
 static void
-__perf_event_sched_in(struct perf_event_context *ctx,
-			struct perf_cpu_context *cpuctx, int cpu)
+__perf_event_sched_in_pinned(struct perf_event_context *ctx,
+			     struct perf_cpu_context *cpuctx, int cpu)
 {
 	struct perf_event *event;
-	int can_add_hw = 1;
-
-	spin_lock(&ctx->lock);
-	ctx->is_active = 1;
-	if (likely(!ctx->nr_events))
-		goto out;
-
-	ctx->timestamp = perf_clock();
-
-	perf_disable();
 
-	/*
-	 * First go through the list and put on any pinned groups
-	 * in order to give them the best chance of going on.
-	 */
 	list_for_each_entry(event, &ctx->pinned_grp_list, group_entry) {
 		if (event->state <= PERF_EVENT_STATE_OFF)
 			continue;
@@ -1283,6 +1269,14 @@ __perf_event_sched_in(struct perf_event_context *ctx,
 			event->state = PERF_EVENT_STATE_ERROR;
 		}
 	}
+}
+
+static void
+__perf_event_sched_in_volatile(struct perf_event_context *ctx,
+			       struct perf_cpu_context *cpuctx, int cpu)
+{
+	int can_add_hw = 1;
+	struct perf_event *event;
 
 	list_for_each_entry(event, &ctx->volatile_grp_list, group_entry) {
 		/*
@@ -1303,6 +1297,31 @@ __perf_event_sched_in(struct perf_event_context *ctx,
 			if (group_sched_in(event, cpuctx, ctx, cpu))
 				can_add_hw = 0;
 	}
+}
+
+static void
+__perf_event_sched_in(struct perf_event_context *ctx,
+			struct perf_cpu_context *cpuctx, int cpu)
+{
+	spin_lock(&ctx->lock);
+	ctx->is_active = 1;
+	if (likely(!ctx->nr_events))
+		goto out;
+
+	ctx->timestamp = perf_clock();
+
+	perf_disable();
+
+	/*
+	 * First go through the list and put on any pinned groups
+	 * in order to give them the best chance of going on.
+	 */
+	__perf_event_sched_in_pinned(ctx, cpuctx, cpu);
+
+	/* Then handle the non-pinned groups */
+	__perf_event_sched_in_volatile(ctx, cpuctx, cpu);
+
+
 	perf_enable();
  out:
 	spin_unlock(&ctx->lock);
-- 
1.6.2.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists