[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20151017012811.GA18581@thinkpad>
Date: Sat, 17 Oct 2015 03:28:11 +0200
From: Stephane Eranian <eranian@...gle.com>
To: linux-kernel@...r.kernel.org
Cc: peterz@...radead.org, mingo@...e.hu, ak@...ux.intel.com,
edumazet@...gle.com, acme@...hat.com
Subject: [PATCH] perf/core: fix RCU issues with cgroup monitoring mode
This patch eliminates all known RCU violations detected
by the RCU checker (PROVE_RCU). The impact code paths
were all related to cgroup mode monitoring and involved
access a task's cgrp.
Patch is relative to tip.git at commit:
f8e9941 Merge branch 'x86/urgent'
Signed-off-by: Stephane Eranian <eranian@...gle.com>
---
kernel/events/core.c | 23 ++++++++++++++++++++---
1 file changed, 20 insertions(+), 3 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ea02109..65c4ffa 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -435,12 +435,16 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
if (!is_cgroup_event(event))
return;
+ rcu_read_lock();
+
cgrp = perf_cgroup_from_task(current);
/*
* Do not update time when cgroup is not active
*/
if (cgrp == event->cgrp)
__update_cgrp_time(event->cgrp);
+
+ rcu_read_unlock();
}
static inline void
@@ -458,9 +462,11 @@ perf_cgroup_set_timestamp(struct task_struct *task,
if (!task || !ctx->nr_cgroups)
return;
+ rcu_read_lock();
cgrp = perf_cgroup_from_task(task);
info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp;
+ rcu_read_unlock();
}
#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
@@ -489,7 +495,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
* we reschedule only in the presence of cgroup
* constrained events.
*/
- rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
@@ -531,8 +536,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
}
}
- rcu_read_unlock();
-
local_irq_restore(flags);
}
@@ -542,6 +545,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
+ rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
*/
@@ -561,6 +565,8 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+
+ rcu_read_unlock();
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
@@ -569,6 +575,7 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
+ rcu_read_lock();
/*
* we come here when we know perf_cgroup_events > 0
*/
@@ -577,6 +584,7 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
/* prev can never be NULL */
cgrp2 = perf_cgroup_from_task(prev);
+
/*
* only need to schedule in cgroup events if we are changing
* cgroup during ctxsw. Cgroup events were not scheduled
@@ -584,6 +592,8 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+
+ rcu_read_unlock();
}
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -2094,6 +2104,7 @@ static int __perf_install_in_context(void *info)
cpuctx->task_ctx = task_ctx;
task = task_ctx->task;
}
+ rcu_read_lock();
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
@@ -2112,6 +2123,8 @@ static int __perf_install_in_context(void *info)
*/
perf_event_sched_in(cpuctx, task_ctx, task);
+ rcu_read_unlock();
+
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, task_ctx);
@@ -2398,7 +2411,9 @@ static void ctx_sched_out(struct perf_event_context *ctx,
return;
update_context_time(ctx);
+ rcu_read_lock();
update_cgrp_time_from_cpuctx(cpuctx);
+ rcu_read_unlock();
if (!ctx->nr_active)
return;
@@ -9442,7 +9457,9 @@ static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
static int __perf_cgroup_move(void *info)
{
struct task_struct *task = info;
+ rcu_read_lock();
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+ rcu_read_unlock();
return 0;
}
--
2.1.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists