[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b2ac87cd-bd5e-439b-b2e3-afb4215c4edc@linux.ibm.com>
Date: Wed, 12 Nov 2025 16:23:32 +0530
From: Shrikanth Hegde <sshegde@...ux.ibm.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Tim Chen <tim.c.chen@...ux.intel.com>, Ingo Molnar <mingo@...nel.org>,
Chen Yu <yu.c.chen@...el.com>, Doug Nelson <doug.nelson@...el.com>,
Mohini Narkhede <mohini.narkhede@...el.com>,
linux-kernel@...r.kernel.org,
Vincent Guittot <vincent.guittot@...aro.org>,
K Prateek Nayak <kprateek.nayak@....com>,
Srikar Dronamraju <srikar@...ux.ibm.com>,
Linus Torvalds <torvalds@...ux-foundation.org>
Subject: Re: [PATCH v4] sched/fair: Skip sched_balance_running cmpxchg when
balance is not due
On 11/12/25 4:07 PM, Peter Zijlstra wrote:
> On Wed, Nov 12, 2025 at 01:32:23PM +0530, Srikar Dronamraju wrote:
>>> group = sched_balance_find_src_group(&env);
>>> if (!group) {
>>> schedstat_inc(sd->lb_nobusyg[idle]);
>>> @@ -11892,6 +11916,9 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
>>> if (!cpumask_subset(cpus, env.dst_grpmask)) {
>>> env.loop = 0;
>>> env.loop_break = SCHED_NR_MIGRATE_BREAK;
>>> + if (need_unlock)
>>> + atomic_set_release(&sched_balance_running, 0);
>>> +
>>
>> One nit:
>> While the current code is good, would conditionally resetting the
>> need_unlock just after resetting the atomic variable better than
>> unconditional reset that we do now?
>
> Right, I had the same thought when grabbed the patch yesterday, but
> ignored it.
>
> But perhaps something like so?
>
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -11717,14 +11717,20 @@ static int sched_balance_rq(int this_cpu
> .fbq_type = all,
> .tasks = LIST_HEAD_INIT(env.tasks),
> };
> - bool need_unlock;
> + bool need_unlock = false;
>
> cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
>
> schedstat_inc(sd->lb_count[idle]);
>
> + if (0) {
> redo:
> - need_unlock = false;
> + if (need_unlock) {
> + atomic_set_release(&sched_balance_running, 0);
> + need_unlock = false;
> + }
> + }
> +
> if (!should_we_balance(&env)) {
> *continue_balancing = 0;
> goto out_balanced;
> @@ -11861,9 +11867,6 @@ static int sched_balance_rq(int this_cpu
> if (!cpumask_subset(cpus, env.dst_grpmask)) {
> env.loop = 0;
> env.loop_break = SCHED_NR_MIGRATE_BREAK;
> - if (need_unlock)
> - atomic_set_release(&sched_balance_running, 0);
> -
> goto redo;
> }
> goto out_all_pinned;
>
>
> ---
>
> The other option is something like this, but Linus hated on this pattern
> when we started with things.
>
> ---
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -11692,6 +11692,8 @@ static void update_lb_imbalance_stat(str
> */
> static atomic_t sched_balance_running = ATOMIC_INIT(0);
>
> +DEFINE_FREE(balance_unlock, atomic_t *, if (_T) atomic_set_release(_T, 0));
> +
> /*
> * Check this_cpu to ensure it is balanced within domain. Attempt to move
> * tasks if there is an imbalance.
> @@ -11717,24 +11719,23 @@ static int sched_balance_rq(int this_cpu
> .fbq_type = all,
> .tasks = LIST_HEAD_INIT(env.tasks),
> };
> - bool need_unlock;
>
> cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
>
> schedstat_inc(sd->lb_count[idle]);
>
> redo:
> - need_unlock = false;
> if (!should_we_balance(&env)) {
> *continue_balancing = 0;
> - goto out_balanced;
> + return 0;
> }
>
> + atomic_t *lock __free(balance_unlock) = NULL;
> if (sd->flags & SD_SERIALIZE) {
> - if (atomic_cmpxchg_acquire(&sched_balance_running, 0, 1)) {
> - goto out_balanced;
> - }
> - need_unlock = true;
> + if (!atomic_try_cmpxchg_acquire(&sched_balance_running, 0, 1))
> + return 0;
> +
> + lock = &sched_balance_running;
> }
>
> group = sched_balance_find_src_group(&env);
> @@ -11861,9 +11862,6 @@ static int sched_balance_rq(int this_cpu
> if (!cpumask_subset(cpus, env.dst_grpmask)) {
> env.loop = 0;
> env.loop_break = SCHED_NR_MIGRATE_BREAK;
> - if (need_unlock)
> - atomic_set_release(&sched_balance_running, 0);
> -
> goto redo;
> }
> goto out_all_pinned;
> @@ -11980,9 +11978,6 @@ static int sched_balance_rq(int this_cpu
> sd->balance_interval < sd->max_interval)
> sd->balance_interval *= 2;
> out:
> - if (need_unlock)
> - atomic_set_release(&sched_balance_running, 0);
> -
> return ld_moved;
> }
>
Second one is difficult to understand.
Is this an option too?
---
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f8e1df9c5199..64e2aeacd65e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11720,14 +11720,13 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
.fbq_type = all,
.tasks = LIST_HEAD_INIT(env.tasks),
};
- bool need_unlock;
+ bool need_unlock = false;
cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
schedstat_inc(sd->lb_count[idle]);
redo:
- need_unlock = false;
if (!should_we_balance(&env)) {
*continue_balancing = 0;
goto out_balanced;
@@ -11864,8 +11863,10 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
if (!cpumask_subset(cpus, env.dst_grpmask)) {
env.loop = 0;
env.loop_break = SCHED_NR_MIGRATE_BREAK;
- if (need_unlock)
+ if (need_unlock) {
atomic_set_release(&sched_balance_running, 0);
+ need_unlock = false;
+ }
goto redo;
}
Powered by blists - more mailing lists