[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <8e6f02a0-2bd0-4e75-9055-2cb7c508ce4e@linux.alibaba.com>
Date: Mon, 17 Jun 2024 11:20:55 +0800
From: Tianchen Ding <dtcccc@...ux.alibaba.com>
To: Ankur Arora <ankur.a.arora@...cle.com>
Cc: tglx@...utronix.de, peterz@...radead.org, torvalds@...ux-foundation.org,
paulmck@...nel.org, rostedt@...dmis.org, mark.rutland@....com,
juri.lelli@...hat.com, joel@...lfernandes.org, raghavendra.kt@....com,
sshegde@...ux.ibm.com, boris.ostrovsky@...cle.com, konrad.wilk@...cle.com,
Ingo Molnar <mingo@...hat.com>, Vincent Guittot
<vincent.guittot@...aro.org>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 29/35] sched: handle preempt=voluntary under
PREEMPT_AUTO
On 2024/5/28 08:35, Ankur Arora wrote:
> The default preemption policy for voluntary preemption under
> PREEMPT_AUTO is to schedule eagerly for tasks of higher scheduling
> class, and lazily for well-behaved, non-idle tasks.
>
> This is the same policy as preempt=none, with an eager handling of
> higher priority scheduling classes.
>
> Comparing a cyclictest workload with a background kernel load of
> 'stress-ng --mmap', shows that both the average and the maximum
> latencies improve:
>
> # stress-ng --mmap 0 &
> # cyclictest --mlockall --smp --priority=80 --interval=200 --distance=0 -q -D 300
>
> Min ( %stdev ) Act ( %stdev ) Avg ( %stdev ) Max ( %stdev )
>
> PREEMPT_AUTO, preempt=voluntary 1.73 ( +- 25.43% ) 62.16 ( +- 303.39% ) 14.92 ( +- 17.96% ) 2778.22 ( +- 15.04% )
> PREEMPT_DYNAMIC, preempt=voluntary 1.83 ( +- 20.76% ) 253.45 ( +- 233.21% ) 18.70 ( +- 15.88% ) 2992.45 ( +- 15.95% )
>
> The table above shows the aggregated latencies across all CPUs.
>
> Cc: Ingo Molnar <mingo@...hat.com>
> Cc: Peter Ziljstra <peterz@...radead.org>
> Cc: Juri Lelli <juri.lelli@...hat.com>
> Cc: Vincent Guittot <vincent.guittot@...aro.org>
> Originally-by: Thomas Gleixner <tglx@...utronix.de>
> Link: https://lore.kernel.org/lkml/87jzshhexi.ffs@tglx/
> Signed-off-by: Ankur Arora <ankur.a.arora@...cle.com>
> ---
> kernel/sched/core.c | 12 ++++++++----
> kernel/sched/sched.h | 6 ++++++
> 2 files changed, 14 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index c25cccc09b65..2bc3ae21a9d0 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -1052,6 +1052,9 @@ static resched_t resched_opt_translate(struct task_struct *curr,
> if (preempt_model_preemptible())
> return RESCHED_NOW;
>
> + if (preempt_model_voluntary() && opt == RESCHED_PRIORITY)
> + return RESCHED_NOW;
> +
> if (is_idle_task(curr))
> return RESCHED_NOW;
>
> @@ -2289,7 +2292,7 @@ void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
> if (p->sched_class == rq->curr->sched_class)
> rq->curr->sched_class->wakeup_preempt(rq, p, flags);
> else if (sched_class_above(p->sched_class, rq->curr->sched_class))
> - resched_curr(rq);
> + resched_curr_priority(rq);
>
Besides the conditions about higher class, can we do resched_curr_priority() in the same class?
For example, in fair class, we can do it when SCHED_NORMAL vs SCHED_IDLE.
Maybe sth like
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 41b58387023d..eedb70234bdd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8352,6 +8352,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
int cse_is_idle, pse_is_idle;
+ enum resched_opt opt = RESCHED_PRIORITY;
if (unlikely(se == pse))
return;
@@ -8385,7 +8386,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
/* Idle tasks are by definition preempted by non-idle tasks. */
if (unlikely(task_has_idle_policy(curr)) &&
likely(!task_has_idle_policy(p)))
- goto preempt;
+ goto preempt; /* RESCHED_PRIORITY */
/*
* Batch and idle tasks do not preempt non-idle tasks (their preemption
@@ -8405,7 +8406,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
* in the inverse case).
*/
if (cse_is_idle && !pse_is_idle)
- goto preempt;
+ goto preempt; /* RESCHED_PRIORITY */
if (cse_is_idle != pse_is_idle)
return;
@@ -8415,13 +8416,15 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
/*
* XXX pick_eevdf(cfs_rq) != se ?
*/
- if (pick_eevdf(cfs_rq) == pse)
+ if (pick_eevdf(cfs_rq) == pse) {
+ opt = RESCHED_DEFAULT;
goto preempt;
+ }
return;
preempt:
- resched_curr(rq);
+ __resched_curr(rq, opt);
}
#ifdef CONFIG_SMP
Powered by blists - more mailing lists