[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250818174753.GA846072@noisy.programming.kicks-ass.net>
Date: Mon, 18 Aug 2025 19:47:53 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: liuwenfang <liuwenfang@...or.com>
Cc: 'Tejun Heo' <tj@...nel.org>, 'David Vernet' <void@...ifault.com>,
'Andrea Righi' <arighi@...dia.com>,
'Changwoo Min' <changwoo@...lia.com>,
'Ingo Molnar' <mingo@...hat.com>,
'Juri Lelli' <juri.lelli@...hat.com>,
'Vincent Guittot' <vincent.guittot@...aro.org>,
'Dietmar Eggemann' <dietmar.eggemann@....com>,
'Steven Rostedt' <rostedt@...dmis.org>,
'Ben Segall' <bsegall@...gle.com>, 'Mel Gorman' <mgorman@...e.de>,
'Valentin Schneider' <vschneid@...hat.com>,
"'linux-kernel@...r.kernel.org'" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v3 1/3] sched_ext: Fix pnt_seq calculation
On Sun, Jul 20, 2025 at 09:36:22AM +0000, liuwenfang wrote:
> Fix pnt_seq calculation for all transitions.
This doesn't even begin to be an adequate changelog.
And please, don't put an out of line function call in
put_prev_set_next_task(.
> Signed-off-by: Wenfang Liu liuwenfang@...or.com
> ---
> kernel/sched/ext.c | 23 ++++++++++++++---------
> kernel/sched/fair.c | 3 +++
> kernel/sched/sched.h | 8 ++++++++
> 3 files changed, 25 insertions(+), 9 deletions(-)
>
> diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
> index f5133249f..93e03b7d0 100644
> --- a/kernel/sched/ext.c
> +++ b/kernel/sched/ext.c
> @@ -3191,14 +3191,6 @@ static void switch_class(struct rq *rq, struct task_struct *next)
> {
> const struct sched_class *next_class = next->sched_class;
>
> -#ifdef CONFIG_SMP
> - /*
> - * Pairs with the smp_load_acquire() issued by a CPU in
> - * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
> - * resched.
> - */
> - smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
> -#endif
> if (!static_branch_unlikely(&scx_ops_cpu_preempt))
> return;
>
> @@ -3233,6 +3225,19 @@ static void switch_class(struct rq *rq, struct task_struct *next)
> }
> }
>
> +void scx_put_prev_set_next(struct rq *rq, struct task_struct *prev,
> + struct task_struct *next)
> +{
> +#ifdef CONFIG_SMP
> + /*
> + * Pairs with the smp_load_acquire() issued by a CPU in
> + * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
> + * resched.
> + */
> + smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
> +#endif
> +}
> +
> static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
> struct task_struct *next)
> {
> @@ -5966,7 +5971,7 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work)
> if (cpu != cpu_of(this_rq)) {
> /*
> * Pairs with smp_store_release() issued by this CPU in
> - * switch_class() on the resched path.
> + * scx_put_prev_set_next() on the resched path.
> *
> * We busy-wait here to guarantee that no other task can
> * be scheduled on our core before the target CPU has
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 0fb9bf995..50d757e92 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -8887,6 +8887,9 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
>
> __put_prev_set_next_dl_server(rq, prev, p);
>
> + if (scx_enabled())
> + scx_put_prev_set_next(rq, prev, p);
> +
> /*
> * Because of the set_next_buddy() in dequeue_task_fair() it is rather
> * likely that a next task is from the same cgroup as the current.
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 47972f34e..bcb7f175c 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1738,12 +1738,17 @@ static inline void scx_rq_clock_invalidate(struct rq *rq)
> WRITE_ONCE(rq->scx.flags, rq->scx.flags & ~SCX_RQ_CLK_VALID);
> }
>
> +void scx_put_prev_set_next(struct rq *rq, struct task_struct *prev,
> + struct task_struct *next);
> #else /* !CONFIG_SCHED_CLASS_EXT */
> #define scx_enabled() false
> #define scx_switched_all() false
>
> static inline void scx_rq_clock_update(struct rq *rq, u64 clock) {}
> static inline void scx_rq_clock_invalidate(struct rq *rq) {}
> +static inline void scx_put_prev_set_next(struct rq *rq,
> + struct task_struct *prev,
> + struct task_struct *next) {}
> #endif /* !CONFIG_SCHED_CLASS_EXT */
>
> /*
> @@ -2465,6 +2470,9 @@ static inline void put_prev_set_next_task(struct rq *rq,
>
> __put_prev_set_next_dl_server(rq, prev, next);
>
> + if (scx_enabled())
> + scx_put_prev_set_next(rq, prev, next);
> +
> if (next == prev)
> return;
>
> --
> 2.17.1
Powered by blists - more mailing lists