Make task_ctx_sched_*() imply EVENT_ALL, since anything less will not actually have scheduled the task in/out at all. Since there's no side that schedules all of a task in (due to the interleave with flexible cpuctx) we can remove this function. Signed-off-by: Peter Zijlstra --- kernel/perf_event.c | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) Index: linux-2.6/kernel/perf_event.c =================================================================== --- linux-2.6.orig/kernel/perf_event.c +++ linux-2.6/kernel/perf_event.c @@ -1986,8 +1986,7 @@ void __perf_event_task_sched_out(struct perf_cgroup_sched_out(task); } -static void task_ctx_sched_out(struct perf_event_context *ctx, - enum event_type_t event_type) +static void task_ctx_sched_out(struct perf_event_context *ctx) { struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); @@ -1997,7 +1996,7 @@ static void task_ctx_sched_out(struct pe if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) return; - ctx_sched_out(ctx, cpuctx, event_type); + ctx_sched_out(ctx, cpuctx, EVENT_ALL); cpuctx->task_ctx = NULL; } @@ -2105,19 +2104,6 @@ static void cpu_ctx_sched_in(struct perf ctx_sched_in(ctx, cpuctx, event_type, task); } -static void task_ctx_sched_in(struct perf_event_context *ctx, - enum event_type_t event_type) -{ - struct perf_cpu_context *cpuctx; - - cpuctx = __get_cpu_context(ctx); - if (cpuctx->task_ctx == ctx) - return; - - ctx_sched_in(ctx, cpuctx, event_type, NULL); - cpuctx->task_ctx = ctx; -} - static void perf_event_context_sched_in(struct perf_event_context *ctx, struct task_struct *task) { @@ -2370,7 +2356,7 @@ static void perf_rotate_context(struct p cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); if (ctx) - task_ctx_sched_out(ctx, EVENT_FLEXIBLE); + ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); rotate_ctx(&cpuctx->ctx); if (ctx) @@ -2378,7 +2364,7 @@ static void perf_rotate_context(struct p cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current); if (ctx) - task_ctx_sched_in(ctx, EVENT_FLEXIBLE); + ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, current); done: if (remove) @@ -2433,7 +2419,7 @@ static void perf_event_enable_on_exec(st goto out; raw_spin_lock(&ctx->lock); - task_ctx_sched_out(ctx, EVENT_ALL); + task_ctx_sched_out(ctx); list_for_each_entry(event, &ctx->pinned_groups, group_entry) { ret = event_enable_on_exec(event, ctx); @@ -6788,7 +6774,7 @@ static void perf_event_exit_task_context * incremented the context's refcount before we do put_ctx below. */ raw_spin_lock(&child_ctx->lock); - task_ctx_sched_out(child_ctx, EVENT_ALL); + task_ctx_sched_out(child_ctx); child->perf_event_ctxp[ctxn] = NULL; /* * If this context is a clone; unclone it so it can't get -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/