[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <1296594038.26581.304.camel@laptop>
Date: Tue, 01 Feb 2011 22:00:38 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Oleg Nesterov <oleg@...hat.com>
Cc: Frederic Weisbecker <fweisbec@...il.com>,
Ingo Molnar <mingo@...e.hu>,
Alan Stern <stern@...land.harvard.edu>,
Arnaldo Carvalho de Melo <acme@...hat.com>,
Paul Mackerras <paulus@...ba.org>,
Prasad <prasad@...ux.vnet.ibm.com>,
Roland McGrath <roland@...hat.com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH] perf: Cure task_oncpu_function_call() races
On Tue, 2011-02-01 at 19:08 +0100, Peter Zijlstra wrote:
> perf_install_in_context() works on a ctx obtained by find_get_context(),
> that context is either new (uncloned) or existing in which case it
> called unclone_ctx(). So I was thinking there was no race with the ctx
> flipping in perf_event_context_sched_out(), _however_ since it only
> acquires ctx->mutex after calling unclone_ctx() there is a race window
> with perf_event_init_task().
>
> This race we should fix with perf_pin_task_context()
I came up with the below.. I'll give it some runtime tomorrow, my brain
just gave up for today..
---
Index: linux-2.6/kernel/perf_event.c
===================================================================
--- linux-2.6.orig/kernel/perf_event.c
+++ linux-2.6/kernel/perf_event.c
@@ -327,7 +327,6 @@ static void perf_unpin_context(struct pe
raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
- put_ctx(ctx);
}
/*
@@ -741,10 +740,10 @@ static void perf_remove_from_context(str
raw_spin_lock_irq(&ctx->lock);
/*
- * If we failed to find a running task, but find it running now that
- * we've acquired the ctx->lock, retry.
+ * If we failed to find a running task, but find the context active now
+ * that we've acquired the ctx->lock, retry.
*/
- if (task_curr(task)) {
+ if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -1104,10 +1103,10 @@ perf_install_in_context(struct perf_even
raw_spin_lock_irq(&ctx->lock);
/*
- * If we failed to find a running task, but find it running now that
- * we've acquired the ctx->lock, retry.
+ * If we failed to find a running task, but find the context active now
+ * that we've acquired the ctx->lock, retry.
*/
- if (task_curr(task)) {
+ if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
@@ -2278,6 +2277,9 @@ find_lively_task_by_vpid(pid_t vpid)
}
+/*
+ * Returns a matching context with refcount and pincount.
+ */
static struct perf_event_context *
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
{
@@ -2302,6 +2304,7 @@ find_get_context(struct pmu *pmu, struct
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
+ ++ctx->pin_count;
return ctx;
}
@@ -2315,6 +2318,7 @@ find_get_context(struct pmu *pmu, struct
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
unclone_ctx(ctx);
+ ++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
@@ -6041,6 +6045,7 @@ SYSCALL_DEFINE5(perf_event_open,
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
+ perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
event->owner = current;
@@ -6066,6 +6071,7 @@ SYSCALL_DEFINE5(perf_event_open,
return event_fd;
err_context:
+ perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
free_event(event);
@@ -6116,6 +6122,7 @@ perf_event_create_kernel_counter(struct
mutex_lock(&ctx->mutex);
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
+ perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
return event;
@@ -6591,6 +6598,7 @@ int perf_event_init_context(struct task_
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
+ put_ctx(parent_ctx);
return ret;
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists