[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <jhj3637lzdm.mognet@arm.com>
Date: Thu, 24 Sep 2020 20:59:33 +0100
From: Valentin Schneider <valentin.schneider@....com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: tglx@...utronix.de, mingo@...nel.org, linux-kernel@...r.kernel.org,
bigeasy@...utronix.de, qais.yousef@....com, swood@...hat.com,
juri.lelli@...hat.com, vincent.guittot@...aro.org,
dietmar.eggemann@....com, rostedt@...dmis.org, bsegall@...gle.com,
mgorman@...e.de, bristot@...hat.com, vincent.donnefort@....com
Subject: Re: [PATCH 8/9] sched: Fix migrate_disable() vs set_cpus_allowed_ptr()
On 21/09/20 17:36, Peter Zijlstra wrote:
> +struct set_affinity_pending {
> + refcount_t refs;
> + struct completion done;
> + struct cpu_stop_work stop_work;
> + struct migration_arg arg;
> +};
> +
> +static int move_task(struct rq *rq, struct rq_flags *rf, struct task_struct *p,
> + int dest_cpu, unsigned int flags)
> +{
> + struct set_affinity_pending my_pending, *pending = NULL;
> + struct migration_arg arg = {
> + .task = p,
> + .dest_cpu = dest_cpu,
> + };
> +
> + if (!(flags & SCA_MIGRATE_ENABLE)) {
> + /* serialized by p->pi_lock */
> + if (!p->migration_pending) {
> + refcount_set(&my_pending.refs, 1);
> + init_completion(&my_pending.done);
> + p->migration_pending = &my_pending;
> + } else {
> + pending = p->migration_pending;
> + refcount_inc(&pending->refs);
> + }
> + }
> + pending = p->migration_pending;
> + /*
> + * - !MIGRATE_ENABLE:
> + * we'll have installed a pending if there wasn't one already.
> + *
> + * - MIGRATE_ENABLE:
> + * we're here because the current CPU isn't matching anymore,
> + * the only way that can happen is because of a concurrent
> + * set_cpus_allowed_ptr() call, which should then still be
> + * pending completion.
> + *
> + * Either way, we really should have a @pending here.
> + */
> + if (WARN_ON_ONCE(!pending))
> + return -EINVAL;
> +
> + arg.done = &pending->done;
> +
> + if (flags & SCA_MIGRATE_ENABLE) {
> +
> + task_rq_unlock(rq, p, rf);
> + pending->arg = arg;
> + stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
> + &pending->arg, &pending->stop_work);
> +
> + return 0;
> + }
> +
> + if (task_running(rq, p) || p->state == TASK_WAKING) {
> +
> + task_rq_unlock(rq, p, rf);
> + stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
> +
Shouldn't we check for is_migrate_disabled(p) before doing any of that?
migration_cpu_stop() does check for it, is there something that prevents us
from acting on it earlier than that?
> + } else {
> + bool complete = false;
> +
> + if (!is_migration_disabled(p)) {
> + if (task_on_rq_queued(p))
> + rq = move_queued_task(rq, rf, p, dest_cpu);
> +
> + p->migration_pending = NULL;
> + complete = true;
> + }
> + task_rq_unlock(rq, p, rf);
> +
> + if (complete)
> + complete_all(&pending->done);
> + }
> +
> + wait_for_completion(&pending->done);
> +
> + if (refcount_dec_and_test(&pending->refs))
> + wake_up_var(&pending->refs);
> +
> + wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs));
> +
> + return 0;
> +}
> +
> /*
> * Change a given task's CPU affinity. Migrate the thread to a
> * proper CPU and schedule it away if the CPU it's executing on
> @@ -2025,19 +2138,8 @@ static int __set_cpus_allowed_ptr(struct
> if (cpumask_test_cpu(task_cpu(p), new_mask))
> goto out;
I think this needs a cancellation of any potential pending migration
requests. Consider a task P0 running on CPU0:
P0 P1 P2
migrate_disable();
<preempt>
set_cpus_allowed_ptr(P0, CPU1);
// waits for completion
set_cpus_allowed_ptr(P0, CPU0);
// Already good, no waiting for completion
<resumes>
migrate_enable();
// task_cpu(p) allowed, no move_task()
AIUI in this scenario P1 would stay forever waiting. I *think* this can be
cured by making this function slightly more hideous:
---
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 01113e6f941f..829334f00f7b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2102,6 +2102,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
u32 flags)
{
const struct cpumask *cpu_valid_mask = cpu_active_mask;
+ struct set_affinity_pending *pending;
+ bool cancel_pending = false;
unsigned int dest_cpu;
struct rq_flags rf;
struct rq *rq;
@@ -2158,14 +2160,20 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
}
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), new_mask)) {
+ cancel_pending = true;
goto out;
+ }
return move_task(rq, &rf, p, dest_cpu, flags);
out:
+ pending = p->migration_pending;
task_rq_unlock(rq, p, &rf);
+ if (cancel_pending && pending)
+ complete_all(&pending->done);
+
return ret;
}
---
>
> - if (task_running(rq, p) || p->state == TASK_WAKING) {
> - struct migration_arg arg = { p, dest_cpu };
> - /* Need help from migration thread: drop lock and wait. */
> - task_rq_unlock(rq, p, &rf);
> - stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
> - return 0;
> - } else if (task_on_rq_queued(p)) {
> - /*
> - * OK, since we're going to drop the lock immediately
> - * afterwards anyway.
> - */
> - rq = move_queued_task(rq, &rf, p, dest_cpu);
> - }
> + return move_task(rq, &rf, p, dest_cpu, flags);
> +
> out:
> task_rq_unlock(rq, p, &rf);
>
Powered by blists - more mailing lists