[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <161459377663.20312.5723191182758095277.tip-bot2@tip-bot2>
Date: Mon, 01 Mar 2021 10:16:16 -0000
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: stable@...nel.org, "Peter Zijlstra (Intel)" <peterz@...radead.org>,
Valentin Schneider <valentin.schneider@....com>,
x86@...nel.org, linux-kernel@...r.kernel.org
Subject: [tip: sched/urgent] sched: Fix affine_move_task() self-concurrency
The following commit has been merged into the sched/urgent branch of tip:
Commit-ID: de8115ef5c83ef2c9941684019d59f4c2e5d16ce
Gitweb: https://git.kernel.org/tip/de8115ef5c83ef2c9941684019d59f4c2e5d16ce
Author: Peter Zijlstra <peterz@...radead.org>
AuthorDate: Wed, 24 Feb 2021 11:31:09 +01:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Mon, 01 Mar 2021 11:02:14 +01:00
sched: Fix affine_move_task() self-concurrency
Consider:
sched_setaffinity(p, X); sched_setaffinity(p, Y);
Then the first will install p->migration_pending = &my_pending; and
issue stop_one_cpu_nowait(pending); and the second one will read
p->migration_pending and _also_ issue: stop_one_cpu_nowait(pending),
the _SAME_ @pending.
This causes stopper list corruption.
Add set_affinity_pending::stop_pending, to indicate if a stopper is in
progress.
Fixes: 6d337eab041d ("sched: Fix migrate_disable() vs set_cpus_allowed_ptr()")
Cc: stable@...nel.org
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Reviewed-by: Valentin Schneider <valentin.schneider@....com>
Link: https://lkml.kernel.org/r/20210224131355.649146419@infradead.org
---
kernel/sched/core.c | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ac05afb..4e4d100 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1864,6 +1864,7 @@ struct migration_arg {
struct set_affinity_pending {
refcount_t refs;
+ unsigned int stop_pending;
struct completion done;
struct cpu_stop_work stop_work;
struct migration_arg arg;
@@ -1982,12 +1983,15 @@ static int migration_cpu_stop(void *data)
* determine is_migration_disabled() and so have to chase after
* it.
*/
+ WARN_ON_ONCE(!pending->stop_pending);
task_rq_unlock(rq, p, &rf);
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
&pending->arg, &pending->stop_work);
return 0;
}
out:
+ if (pending)
+ pending->stop_pending = false;
task_rq_unlock(rq, p, &rf);
if (complete)
@@ -2183,7 +2187,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
int dest_cpu, unsigned int flags)
{
struct set_affinity_pending my_pending = { }, *pending = NULL;
- bool complete = false;
+ bool stop_pending, complete = false;
/* Can the task run on the task's current CPU? If so, we're done */
if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
@@ -2256,14 +2260,19 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
* anything else we cannot do is_migration_disabled(), punt
* and have the stopper function handle it all race-free.
*/
+ stop_pending = pending->stop_pending;
+ if (!stop_pending)
+ pending->stop_pending = true;
refcount_inc(&pending->refs); /* pending->{arg,stop_work} */
if (flags & SCA_MIGRATE_ENABLE)
p->migration_flags &= ~MDF_PUSH;
task_rq_unlock(rq, p, rf);
- stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
- &pending->arg, &pending->stop_work);
+ if (!stop_pending) {
+ stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
+ &pending->arg, &pending->stop_work);
+ }
if (flags & SCA_MIGRATE_ENABLE)
return 0;
Powered by blists - more mailing lists