[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241106135346.GL24862@noisy.programming.kicks-ass.net>
Date: Wed, 6 Nov 2024 14:53:46 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Mike Galbraith <efault@....de>
Cc: Phil Auld <pauld@...hat.com>, mingo@...hat.com, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
vschneid@...hat.com, linux-kernel@...r.kernel.org,
kprateek.nayak@....com, wuyun.abel@...edance.com,
youssefesmat@...omium.org, tglx@...utronix.de
Subject: Re: [PATCH 17/24] sched/fair: Implement delayed dequeue
On Tue, Nov 05, 2024 at 05:05:12AM +0100, Mike Galbraith wrote:
> After one minute of lightly loaded box browsing, trace_printk() said:
>
> 645 - racy peek says there is a room available
> 11 - cool, reserved room is free
> 206 - no vacancy or wakee pinned
> 38807 - SIS accommodates room seeker
>
> The below should improve the odds, but high return seems unlikely.
>
> ---
> kernel/sched/core.c | 9 ++++++++-
> 1 file changed, 8 insertions(+), 1 deletion(-)
>
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -3790,7 +3790,13 @@ static int ttwu_runnable(struct task_str
> rq = __task_rq_lock(p, &rf);
> if (task_on_rq_queued(p)) {
> update_rq_clock(rq);
> - if (p->se.sched_delayed)
> + /*
> + * If wakee is mobile and the room it reserved is occupied, let it try to migrate.
> + */
> + if (p->se.sched_delayed && rq->nr_running > 1 && cpumask_weight(p->cpus_ptr) > 1) {
> + dequeue_task(rq, p, DEQUEUE_SLEEP | DEQUEUE_DELAYED | DEQUEUE_NOCLOCK);
> + goto out_unlock;
> + } else if (p->se.sched_delayed)
> enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
> if (!task_on_cpu(rq, p)) {
> /*
> @@ -3802,6 +3808,7 @@ static int ttwu_runnable(struct task_str
> ttwu_do_wakeup(p);
> ret = 1;
> }
> +out_unlock:
> __task_rq_unlock(rq, &rf);
>
> return ret;
So... I was trying to make that prettier and ended up with something
like this:
---
kernel/sched/core.c | 46 ++++++++++++++++++++++++++++------------------
kernel/sched/sched.h | 5 +++++
2 files changed, 33 insertions(+), 18 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 54d82c21fc8e..b083c6385e88 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3774,28 +3774,38 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
*/
static int ttwu_runnable(struct task_struct *p, int wake_flags)
{
- struct rq_flags rf;
- struct rq *rq;
- int ret = 0;
+ CLASS(__task_rq_lock, rq_guard)(p);
+ struct rq *rq = rq_guard.rq;
- rq = __task_rq_lock(p, &rf);
- if (task_on_rq_queued(p)) {
- update_rq_clock(rq);
- if (p->se.sched_delayed)
- enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
- if (!task_on_cpu(rq, p)) {
- /*
- * When on_rq && !on_cpu the task is preempted, see if
- * it should preempt the task that is current now.
- */
- wakeup_preempt(rq, p, wake_flags);
+ if (!task_on_rq_queued(p))
+ return 0;
+
+ update_rq_clock(rq);
+ if (p->se.sched_delayed) {
+ int queue_flags = ENQUEUE_DELAYED | ENQUEUE_NOCLOCK;
+
+ /*
+ * Since sched_delayed means we cannot be current anywhere,
+ * dequeue it here and have it fall through to the
+ * select_task_rq() case further along the ttwu() path.
+ */
+ if (rq->nr_running > 1 && p->nr_cpus_allowed > 1) {
+ dequeue_task(rq, p, DEQUEUE_SLEEP | queue_flags);
+ return 0;
}
- ttwu_do_wakeup(p);
- ret = 1;
+
+ enqueue_task(rq, p, queue_flags);
}
- __task_rq_unlock(rq, &rf);
+ if (!task_on_cpu(rq, p)) {
+ /*
+ * When on_rq && !on_cpu the task is preempted, see if
+ * it should preempt the task that is current now.
+ */
+ wakeup_preempt(rq, p, wake_flags);
+ }
+ ttwu_do_wakeup(p);
- return ret;
+ return 1;
}
#ifdef CONFIG_SMP
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 21b1780c6695..1714ac38500f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1787,6 +1787,11 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
}
+DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct,
+ _T->rq = __task_rq_lock(_T->lock, &_T->rf),
+ __task_rq_unlock(_T->rq, &_T->rf),
+ struct rq *rq; struct rq_flags rf)
+
DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
_T->rq = task_rq_lock(_T->lock, &_T->rf),
task_rq_unlock(_T->rq, _T->lock, &_T->rf),
Powered by blists - more mailing lists