[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251124223111.3616950-10-jstultz@google.com>
Date: Mon, 24 Nov 2025 22:31:01 +0000
From: John Stultz <jstultz@...gle.com>
To: LKML <linux-kernel@...r.kernel.org>
Cc: John Stultz <jstultz@...gle.com>, Joel Fernandes <joelagnelf@...dia.com>,
Qais Yousef <qyousef@...alina.io>, Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>, Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>, Dietmar Eggemann <dietmar.eggemann@....com>,
Valentin Schneider <vschneid@...hat.com>, Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Zimuzo Ezeozue <zezeozue@...gle.com>, Mel Gorman <mgorman@...e.de>,
Will Deacon <will@...nel.org>, Waiman Long <longman@...hat.com>, Boqun Feng <boqun.feng@...il.com>,
"Paul E. McKenney" <paulmck@...nel.org>, Metin Kaya <Metin.Kaya@....com>,
Xuewen Yan <xuewen.yan94@...il.com>, K Prateek Nayak <kprateek.nayak@....com>,
Thomas Gleixner <tglx@...utronix.de>, Daniel Lezcano <daniel.lezcano@...aro.org>,
Suleiman Souhlal <suleiman@...gle.com>, kuyo chang <kuyo.chang@...iatek.com>, hupu <hupu.gm@...il.com>,
kernel-team@...roid.com
Subject: [PATCH v24 09/11] sched: Have try_to_wake_up() handle
return-migration for PROXY_WAKING case
This patch adds logic so try_to_wake_up() will notice if we are
waking a task where blocked_on == PROXY_WAKING, and if necessary
dequeue the task so the wakeup will naturally return-migrate the
donor task back to a cpu it can run on.
This helps performance as we do the dequeue and wakeup under the
locks normally taken in the try_to_wake_up() and avoids having
to do proxy_force_return() from __schedule(), which has to
re-take similar locks and then force a pick again loop.
This was split out from the larger proxy patch, and
significantly reworked.
Credits for the original patch go to:
Peter Zijlstra (Intel) <peterz@...radead.org>
Juri Lelli <juri.lelli@...hat.com>
Valentin Schneider <valentin.schneider@....com>
Connor O'Brien <connoro@...gle.com>
Signed-off-by: John Stultz <jstultz@...gle.com>
---
v24:
* Reworked proxy_needs_return() so its less nested as suggested
by K Prateek
* Switch to using block_task with DEQUEUE_SPECIAL as suggested
by K Prateek
* Fix edge case to reset wake_cpu if select_task_rq() chooses
the current rq and we skip set_task_cpu()
Cc: Joel Fernandes <joelagnelf@...dia.com>
Cc: Qais Yousef <qyousef@...alina.io>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Juri Lelli <juri.lelli@...hat.com>
Cc: Vincent Guittot <vincent.guittot@...aro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@....com>
Cc: Valentin Schneider <vschneid@...hat.com>
Cc: Steven Rostedt <rostedt@...dmis.org>
Cc: Ben Segall <bsegall@...gle.com>
Cc: Zimuzo Ezeozue <zezeozue@...gle.com>
Cc: Mel Gorman <mgorman@...e.de>
Cc: Will Deacon <will@...nel.org>
Cc: Waiman Long <longman@...hat.com>
Cc: Boqun Feng <boqun.feng@...il.com>
Cc: "Paul E. McKenney" <paulmck@...nel.org>
Cc: Metin Kaya <Metin.Kaya@....com>
Cc: Xuewen Yan <xuewen.yan94@...il.com>
Cc: K Prateek Nayak <kprateek.nayak@....com>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Daniel Lezcano <daniel.lezcano@...aro.org>
Cc: Suleiman Souhlal <suleiman@...gle.com>
Cc: kuyo chang <kuyo.chang@...iatek.com>
Cc: hupu <hupu.gm@...il.com>
Cc: kernel-team@...roid.com
---
kernel/sched/core.c | 84 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 82 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fcf64c4db437e..e4a49c694dcd9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3697,6 +3697,64 @@ static inline void proxy_set_task_cpu(struct task_struct *p, int cpu)
__set_task_cpu(p, cpu);
p->wake_cpu = wake_cpu;
}
+
+static bool proxy_task_runnable_but_waking(struct task_struct *p)
+{
+ if (!sched_proxy_exec())
+ return false;
+ return (READ_ONCE(p->__state) == TASK_RUNNING &&
+ READ_ONCE(p->blocked_on) == PROXY_WAKING);
+}
+
+static inline struct task_struct *proxy_resched_idle(struct rq *rq);
+
+/*
+ * Checks to see if task p has been proxy-migrated to another rq
+ * and needs to be returned. If so, we deactivate the task here
+ * so that it can be properly woken up on the p->wake_cpu
+ * (or whichever cpu select_task_rq() picks at the bottom of
+ * try_to_wake_up()
+ */
+static inline bool proxy_needs_return(struct rq *rq, struct task_struct *p)
+{
+ if (!sched_proxy_exec())
+ return false;
+
+ guard(raw_spinlock)(&p->blocked_lock);
+
+ /* If task isn't PROXY_WAKING, we don't need to do return migration */
+ if (p->blocked_on != PROXY_WAKING)
+ return false;
+
+ __clear_task_blocked_on(p, PROXY_WAKING);
+
+ /* If already current, don't need to return migrate */
+ if (task_current(rq, p))
+ return false;
+
+ /* If wake_cpu is targeting this cpu, don't bother return migrating */
+ if (p->wake_cpu == cpu_of(rq)) {
+ resched_curr(rq);
+ return false;
+ }
+
+ /* If we're return migrating the rq->donor, switch it out for idle */
+ if (task_current_donor(rq, p))
+ proxy_resched_idle(rq);
+
+ /* (ab)Use DEQUEUE_SPECIAL to ensure task is always blocked here. */
+ block_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SPECIAL);
+ return true;
+}
+#else /* !CONFIG_SCHED_PROXY_EXEC */
+static bool proxy_task_runnable_but_waking(struct task_struct *p)
+{
+ return false;
+}
+static inline bool proxy_needs_return(struct rq *rq, struct task_struct *p)
+{
+ return false;
+}
#endif /* CONFIG_SCHED_PROXY_EXEC */
static void
@@ -3784,6 +3842,8 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
update_rq_clock(rq);
if (p->se.sched_delayed)
enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
+ if (proxy_needs_return(rq, p))
+ goto out;
if (!task_on_cpu(rq, p)) {
/*
* When on_rq && !on_cpu the task is preempted, see if
@@ -3794,6 +3854,7 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
ttwu_do_wakeup(p);
ret = 1;
}
+out:
__task_rq_unlock(rq, &rf);
return ret;
@@ -4181,6 +4242,8 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* it disabling IRQs (this allows not taking ->pi_lock).
*/
WARN_ON_ONCE(p->se.sched_delayed);
+ /* If p is current, we know we can run here, so clear blocked_on */
+ clear_task_blocked_on(p, NULL);
if (!ttwu_state_match(p, state, &success))
goto out;
@@ -4197,8 +4260,15 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
smp_mb__after_spinlock();
- if (!ttwu_state_match(p, state, &success))
- break;
+ if (!ttwu_state_match(p, state, &success)) {
+ /*
+ * If we're already TASK_RUNNING, and PROXY_WAKING
+ * continue on to ttwu_runnable check to force
+ * proxy_needs_return evaluation
+ */
+ if (!proxy_task_runnable_but_waking(p))
+ break;
+ }
trace_sched_waking(p);
@@ -4305,6 +4375,16 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
wake_flags |= WF_MIGRATED;
psi_ttwu_dequeue(p);
set_task_cpu(p, cpu);
+ } else if (cpu != p->wake_cpu) {
+ /*
+ * If we were proxy-migrated to cpu, then
+ * select_task_rq() picks cpu instead of wake_cpu
+ * to return to, we won't call set_task_cpu(),
+ * leaving a stale wake_cpu pointing to where we
+ * proxy-migrated from. So just fixup wake_cpu here
+ * if its not correct
+ */
+ p->wake_cpu = cpu;
}
ttwu_queue(p, cpu, wake_flags);
--
2.52.0.487.g5c8c507ade-goog
Powered by blists - more mailing lists