[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <877cajdu8x.fsf@oracle.com>
Date: Mon, 07 Oct 2024 22:43:58 -0700
From: Ankur Arora <ankur.a.arora@...cle.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: bigeasy@...utronix.de, tglx@...utronix.de, mingo@...nel.org,
linux-kernel@...r.kernel.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
vschneid@...hat.com, ankur.a.arora@...cle.com, efault@....de
Subject: Re: [PATCH 2/5] sched: Add Lazy preemption model
Peter Zijlstra <peterz@...radead.org> writes:
> Change fair to use resched_curr_lazy(), which, when the lazy
> preemption model is selected, will set TIF_NEED_RESCHED_LAZY.
>
> This LAZY bit will be promoted to the full NEED_RESCHED bit on tick.
> As such, the average delay between setting LAZY and actually
> rescheduling will be TICK_NSEC/2.
>
> In short, Lazy preemption will delay preemption for fair class but
> will function as Full preemption for all the other classes, most
> notably the realtime (RR/FIFO/DEADLINE) classes.
>
> The goal is to bridge the performance gap with Voluntary, such that we
> might eventually remove that option entirely.
>
> Suggested-by: Thomas Gleixner <tglx@...utronix.de>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> ---
> include/linux/preempt.h | 8 ++++-
> kernel/Kconfig.preempt | 15 +++++++++
> kernel/sched/core.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++--
> kernel/sched/debug.c | 5 +--
> kernel/sched/fair.c | 6 +--
> kernel/sched/sched.h | 1
> 6 files changed, 103 insertions(+), 8 deletions(-)
>
> --- a/include/linux/preempt.h
> +++ b/include/linux/preempt.h
> @@ -486,6 +486,7 @@ DEFINE_LOCK_GUARD_0(migrate, migrate_dis
> extern bool preempt_model_none(void);
> extern bool preempt_model_voluntary(void);
> extern bool preempt_model_full(void);
> +extern bool preempt_model_lazy(void);
>
> #else
>
> @@ -502,6 +503,11 @@ static inline bool preempt_model_full(vo
> return IS_ENABLED(CONFIG_PREEMPT);
> }
>
> +static inline bool preempt_model_lazy(void)
> +{
> + return IS_ENABLED(CONFIG_PREEMPT_LAZY);
> +}
> +
> #endif
>
> static inline bool preempt_model_rt(void)
> @@ -519,7 +525,7 @@ static inline bool preempt_model_rt(void
> */
> static inline bool preempt_model_preemptible(void)
> {
> - return preempt_model_full() || preempt_model_rt();
> + return preempt_model_full() || preempt_model_lazy() || preempt_model_rt();
> }
In addition to preempt_model_preemptible() we probably also need
static inline bool preempt_model_minimize_latency(void)
{
return preempt_model_full() || preempt_model_rt();
}
for spin_needbreak()/rwlock_needbreak().
That would make the behaviour of spin_needbreak() under the lazy model
similar to none/voluntary.
> #endif /* __LINUX_PREEMPT_H */
> --- a/kernel/Kconfig.preempt
> +++ b/kernel/Kconfig.preempt
> @@ -11,6 +11,9 @@ config PREEMPT_BUILD
> select PREEMPTION
> select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
>
> +config ARCH_HAS_PREEMPT_LAZY
> + bool
> +
> choice
> prompt "Preemption Model"
> default PREEMPT_NONE
> @@ -67,6 +70,18 @@ config PREEMPT
> embedded system with latency requirements in the milliseconds
> range.
>
> +config PREEMPT_LAZY
> + bool "Scheduler controlled preemption model"
> + depends on !ARCH_NO_PREEMPT
> + depends on ARCH_HAS_PREEMPT_LAZY
> + select PREEMPT_BUILD
> + help
> + This option provides a scheduler driven preemption model that
> + is fundamentally similar to full preemption, but is less
> + eager to preempt SCHED_NORMAL tasks in an attempt to
> + reduce lock holder preemption and recover some of the performance
> + gains seen from using Voluntary preemption.
> +
> config PREEMPT_RT
> bool "Fully Preemptible Kernel (Real-Time)"
> depends on EXPERT && ARCH_SUPPORTS_RT
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -1078,6 +1078,9 @@ static void __resched_curr(struct rq *rq
>
> lockdep_assert_rq_held(rq);
>
> + if (is_idle_task(curr) && tif == TIF_NEED_RESCHED_LAZY)
> + tif = TIF_NEED_RESCHED;
> +
Tasks with idle policy get handled at the usual user space boundary.
Maybe a comment reflecting that?
> if (cti->flags & ((1 << tif) | _TIF_NEED_RESCHED))
> return;
>
> @@ -1103,6 +1106,32 @@ void resched_curr(struct rq *rq)
> __resched_curr(rq, TIF_NEED_RESCHED);
> }
>
> +#ifdef CONFIG_PREEMPT_DYNAMIC
> +static DEFINE_STATIC_KEY_FALSE(sk_dynamic_preempt_lazy);
> +static __always_inline bool dynamic_preempt_lazy(void)
> +{
> + return static_branch_unlikely(&sk_dynamic_preempt_lazy);
> +}
> +#else
> +static __always_inline bool dynamic_preempt_lazy(void)
> +{
> + return IS_ENABLED(PREEMPT_LAZY);
> +}
> +#endif
> +
> +static __always_inline int tif_need_resched_lazy(void)
> +{
> + if (dynamic_preempt_lazy())
> + return TIF_NEED_RESCHED_LAZY;
> +
> + return TIF_NEED_RESCHED;
> +}
Nice. This simplifies things.
> +void resched_curr_lazy(struct rq *rq)
> +{
> + __resched_curr(rq, tif_need_resched_lazy());
> +}
> +
> void resched_cpu(int cpu)
> {
> struct rq *rq = cpu_rq(cpu);
> @@ -5598,6 +5627,10 @@ void sched_tick(void)
> update_rq_clock(rq);
> hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
> update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
> +
> + if (dynamic_preempt_lazy() && tif_test_bit(TIF_NEED_RESCHED_LAZY))
> + resched_curr(rq);
> +
So this works for SCHED_NORMAL. But, does this do the right thing for
deadline etc other scheduling classes?
--
ankur
Powered by blists - more mailing lists