[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YqdLhrJnJUqONjim@hirez.programming.kicks-ass.net>
Date: Mon, 13 Jun 2022 16:36:54 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Ravi Bangoria <ravi.bangoria@....com>
Cc: acme@...nel.org, alexander.shishkin@...ux.intel.com,
jolsa@...hat.com, namhyung@...nel.org, songliubraving@...com,
eranian@...gle.com, alexey.budankov@...ux.intel.com,
ak@...ux.intel.com, mark.rutland@....com, megha.dey@...el.com,
frederic@...nel.org, maddy@...ux.ibm.com, irogers@...gle.com,
kim.phillips@....com, linux-kernel@...r.kernel.org,
santosh.shukla@....com
Subject: Re: [RFC v2] perf: Rewrite core context handling
On Mon, Jun 13, 2022 at 04:35:11PM +0200, Peter Zijlstra wrote:
>
>
> Right, so sorry for being incredibly tardy on this. Find below the
> patch fwd ported to something recent.
>
> I'll reply to this with fixes and comments.
You write:
>> A simple perf stat/record/top survives with the patch but machine
>> crashes with first run of perf test (stale cpc->task_epc causing the
>>crash). Lockdep is also screaming a lot :)
> @@ -7669,20 +7877,15 @@ static void perf_event_addr_filters_exec
> void perf_event_exec(void)
> {
> struct perf_event_context *ctx;
> - int ctxn;
> -
> - for_each_task_context_nr(ctxn) {
> - perf_event_enable_on_exec(ctxn);
> - perf_event_remove_on_exec(ctxn);
>
> - rcu_read_lock();
> - ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
> - if (ctx) {
> - perf_iterate_ctx(ctx, perf_event_addr_filters_exec,
> - NULL, true);
> - }
> - rcu_read_unlock();
> + rcu_read_lock();
> + ctx = rcu_dereference(current->perf_event_ctxp);
> + if (ctx) {
> + perf_event_enable_on_exec(ctx);
> + perf_event_remove_on_exec(ctx);
> + perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
> }
> + rcu_read_unlock();
> }
>
> struct remote_output {
The above goes *bang* because perf_event_remove_on_exec() will take a
mutex, which isn't allowed under rcu_read_lock().
The below cures.
---
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4384,8 +4384,6 @@ static void perf_event_remove_on_exec(st
unsigned long flags;
bool modified = false;
- perf_pin_task_context(current);
-
mutex_lock(&ctx->mutex);
if (WARN_ON_ONCE(ctx->task != current))
@@ -4406,13 +4404,11 @@ static void perf_event_remove_on_exec(st
raw_spin_lock_irqsave(&ctx->lock, flags);
if (modified)
clone_ctx = unclone_ctx(ctx);
- --ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
unlock:
mutex_unlock(&ctx->mutex);
- put_ctx(ctx);
if (clone_ctx)
put_ctx(clone_ctx);
}
@@ -7878,14 +7874,16 @@ void perf_event_exec(void)
{
struct perf_event_context *ctx;
- rcu_read_lock();
- ctx = rcu_dereference(current->perf_event_ctxp);
- if (ctx) {
- perf_event_enable_on_exec(ctx);
- perf_event_remove_on_exec(ctx);
- perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
- }
- rcu_read_unlock();
+ ctx = perf_pin_task_context(current);
+ if (!ctx)
+ return;
+
+ perf_event_enable_on_exec(ctx);
+ perf_event_remove_on_exec(ctx);
+ perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true);
+
+ perf_unpin_context(ctx);
+ put_ctx(ctx);
}
struct remote_output {
Powered by blists - more mailing lists