lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue,  4 May 2010 14:38:41 +0200
From:	Tejun Heo <tj@...nel.org>
To:	mingo@...e.hu, peterz@...radead.org, efault@....de, avi@...hat.com,
	paulus@...ba.org, acme@...hat.com, linux-kernel@...r.kernel.org
Cc:	Tejun Heo <tj@...nel.org>, Peter Zijlstra <a.p.zijlstra@...llo.nl>
Subject: [PATCH 09/12] perf: factor out perf_event_switch_clones()

Factor out perf_event_switch_clones() from
perf_event_task_sched_out().  This is to ease future changes and
doesn't cause any functional difference.

Signed-off-by: Tejun Heo <tj@...nel.org>
Cc: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc: Paul Mackerras <paulus@...ba.org>
Cc: Ingo Molnar <mingo@...e.hu>
Cc: Arnaldo Carvalho de Melo <acme@...hat.com>
---
 kernel/perf_event.c |   62 +++++++++++++++++++++++++++++---------------------
 1 files changed, 36 insertions(+), 26 deletions(-)

diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 295699f..3f3e328 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1158,30 +1158,14 @@ void perf_event_task_migrate(struct task_struct *task, int new_cpu)
 		perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
 }
 
-/*
- * Called from scheduler to remove the events of the current task,
- * with interrupts disabled.
- *
- * We stop each event and update the event value in event->count.
- *
- * This does not protect us against NMI, but disable()
- * sets the disabled bit in the control field of event _before_
- * accessing the event control register. If a NMI hits, then it will
- * not restart the event.
- */
-void perf_event_task_sched_out(struct rq *rq, struct task_struct *task,
-			       struct task_struct *next)
+static bool perf_event_switch_clones(struct perf_cpu_context *cpuctx,
+				     struct perf_event_context *ctx,
+				     struct task_struct *task,
+				     struct task_struct *next)
 {
-	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-	struct perf_event_context *ctx = task->perf_event_ctxp;
 	struct perf_event_context *next_ctx;
 	struct perf_event_context *parent;
-	int do_switch = 1;
-
-	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
-
-	if (likely(!ctx || !cpuctx->task_ctx))
-		return;
+	bool switched = false;
 
 	rcu_read_lock();
 	parent = rcu_dereference(ctx->parent_ctx);
@@ -1208,7 +1192,7 @@ void perf_event_task_sched_out(struct rq *rq, struct task_struct *task,
 			next->perf_event_ctxp = ctx;
 			ctx->task = next;
 			next_ctx->task = task;
-			do_switch = 0;
+			switched = true;
 
 			perf_event_sync_stat(ctx, next_ctx);
 		}
@@ -1217,10 +1201,36 @@ void perf_event_task_sched_out(struct rq *rq, struct task_struct *task,
 	}
 	rcu_read_unlock();
 
-	if (do_switch) {
-		ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-		cpuctx->task_ctx = NULL;
-	}
+	return switched;
+}
+
+/*
+ * Called from scheduler to remove the events of the current task,
+ * with interrupts disabled.
+ *
+ * We stop each event and update the event value in event->count.
+ *
+ * This does not protect us against NMI, but disable()
+ * sets the disabled bit in the control field of event _before_
+ * accessing the event control register. If a NMI hits, then it will
+ * not restart the event.
+ */
+void perf_event_task_sched_out(struct rq *rq, struct task_struct *task,
+			       struct task_struct *next)
+{
+	struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+	struct perf_event_context *ctx = task->perf_event_ctxp;
+
+	perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+
+	if (likely(!ctx || !cpuctx->task_ctx))
+		return;
+
+	if (perf_event_switch_clones(cpuctx, ctx, task, next))
+		return;
+
+	ctx_sched_out(ctx, cpuctx, EVENT_ALL);
+	cpuctx->task_ctx = NULL;
 }
 
 static void task_ctx_sched_out(struct perf_event_context *ctx,
-- 
1.6.4.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists