[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230929102135.GD6282@noisy.programming.kicks-ass.net>
Date: Fri, 29 Sep 2023 12:21:35 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Kuyo Chang (張建文) <Kuyo.Chang@...iatek.com>
Cc: "dietmar.eggemann@....com" <dietmar.eggemann@....com>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>,
"linux-mediatek@...ts.infradead.org"
<linux-mediatek@...ts.infradead.org>,
"rostedt@...dmis.org" <rostedt@...dmis.org>,
wsd_upstream <wsd_upstream@...iatek.com>,
"vschneid@...hat.com" <vschneid@...hat.com>,
"bristot@...hat.com" <bristot@...hat.com>,
"juri.lelli@...hat.com" <juri.lelli@...hat.com>,
"mingo@...hat.com" <mingo@...hat.com>,
"linux-arm-kernel@...ts.infradead.org"
<linux-arm-kernel@...ts.infradead.org>,
"bsegall@...gle.com" <bsegall@...gle.com>,
"mgorman@...e.de" <mgorman@...e.de>,
"matthias.bgg@...il.com" <matthias.bgg@...il.com>,
"vincent.guittot@...aro.org" <vincent.guittot@...aro.org>,
"angelogioacchino.delregno@...labora.com"
<angelogioacchino.delregno@...labora.com>
Subject: Re: [PATCH 1/1] sched/core: Fix stuck on completion for
affine_move_task() when stopper disable
On Wed, Sep 27, 2023 at 03:57:35PM +0000, Kuyo Chang (張建文) wrote:
> This issue occurs at CPU hotplug/set_affinity stress test.
> The reproduce ratio is very low(about once a week).
I'm assuming you're running an arm64 kernel with preempt_full=y (the
default for arm64).
Could you please test the below?
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d8fd29d66b24..079a63b8a954 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2645,9 +2645,11 @@ static int migration_cpu_stop(void *data)
* it.
*/
WARN_ON_ONCE(!pending->stop_pending);
+ preempt_disable();
task_rq_unlock(rq, p, &rf);
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
&pending->arg, &pending->stop_work);
+ preempt_enable();
return 0;
}
out:
@@ -2967,12 +2969,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
complete = true;
}
+ preempt_disable();
task_rq_unlock(rq, p, rf);
-
if (push_task) {
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
p, &rq->push_work);
}
+ preempt_enable();
if (complete)
complete_all(&pending->done);
@@ -3038,12 +3041,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
if (flags & SCA_MIGRATE_ENABLE)
p->migration_flags &= ~MDF_PUSH;
+ preempt_disable();
task_rq_unlock(rq, p, rf);
-
if (!stop_pending) {
stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
&pending->arg, &pending->stop_work);
}
+ preempt_enable();
if (flags & SCA_MIGRATE_ENABLE)
return 0;
@@ -9459,6 +9461,7 @@ static void balance_push(struct rq *rq)
* Temporarily drop rq->lock such that we can wake-up the stop task.
* Both preemption and IRQs are still disabled.
*/
+ preempt_disable();
raw_spin_rq_unlock(rq);
stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
this_cpu_ptr(&push_work));
@@ -9468,6 +9471,7 @@ static void balance_push(struct rq *rq)
* which kthread_is_per_cpu() and will push this task away.
*/
raw_spin_rq_lock(rq);
+ preempt_enable();
}
static void balance_push_set(int cpu, bool on)
Powered by blists - more mailing lists