[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <a96d3a24e47da7572e7b6d6fa7bb1f6f1be9353e.camel@linux.intel.com>
Date: Tue, 14 Oct 2025 11:05:56 -0700
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Ingo Molnar <mingo@...nel.org>, Chen Yu <yu.c.chen@...el.com>, Doug
Nelson <doug.nelson@...el.com>, Mohini Narkhede
<mohini.narkhede@...el.com>, linux-kernel@...r.kernel.org, Vincent Guittot
<vincent.guittot@...aro.org>, Shrikanth Hegde <sshegde@...ux.ibm.com>, K
Prateek Nayak <kprateek.nayak@....com>
Subject: Re: [RESEND PATCH] sched/fair: Skip sched_balance_running cmpxchg
when balance is not due
On Tue, 2025-10-14 at 11:24 +0200, Peter Zijlstra wrote:
> On Mon, Oct 13, 2025 at 02:54:19PM -0700, Tim Chen wrote:
>
> > > So I'm not sure I understand the situation, @continue_balancing should
> > > limit this concurrency to however many groups are on this domain -- your
> > > granite thing with SNC on would have something like 6 groups?
> >
> > That's a good point. But I think the contention is worse than
> > 6 CPUs.
> >
> > The hierarchy would be
> >
> > SMT
> > NUMA-level1
> > NUMA-level2
> > NUMA-level3
> > NUMA-level4
>
> Aren't you missing the LLC/NODE domain here? We should have at least one
> !SD_NUMA domain above SMT.
Yeah, I should have said the MC level which contains SMT groups
SMT
MC
NUMA-level1
...
Actual dmesg log:
[ 7.977893] CPU0 attaching sched-domain(s):
[ 7.977897] domain-0: span=0,192 level=SMT
[ 7.977902] groups: 0:{ span=0 cap=972 }, 192:{ span=192 cap=1022 }[ 7.977907] domain-1: span=0-31,192-223 level=MC[ 7.977909] groups: 0:{ span=0,192 cap=1994 }, 1:{ span=1,193
cap=2048 }, 2:{ span=2,194 cap=2047 }, 3:{ span=3,195 cap=2047 }, 4:{ span=4,196 cap=2048 }, 5:{ span=5,197 cap=2046 }, 6:{ span=6,198 cap=2047 }, 7:{ span=7,199 cap=2048 }, 8:{ span=8,200 cap=2047 },
9:{ span=9,201 cap=2046 }, 10:{ span=10,202 cap=2046 }, 11:{ span=11,203 cap=2046 }, 12:{ span=12,204 cap=2044 }, 13:{ span=13,205 cap=2048 }, 14:{ span=14,206 cap=2046 }, 15:{ span=15,207 cap=2043 },
16:{ span=16,208 cap=2046 }, 17:{ span=17,209 cap=2048 }, 18:{ span=18,210 cap=2046 }, 19:{ span=19,211 cap=2045 }, 20:{ span=20,212 cap=2046 }, 21:{ span=21,213 cap=2044 }, 22:{ span=22,214 cap=2045
}, 23:{ span=23,215 cap=2046 }, 24:{ span=24,216 cap=2045 }, 25:{ span=25,217 cap=2044 }, 26:{ span=26,218 cap=2046 }, 27:{ span=27,219 cap=2045 }, 28:{ span=28,220 cap=2045 }, 29:{ span=29,221
cap=2046 }, 30:{ span=30,222 cap=2045 }, 31:{ span=31,223 cap=2045 }
[ 7.977956] domain-2: span=0-63,192-255 level=NUMA
[ 7.977958] groups: 0:{ span=0-31,192-223 cap=65418 }, 32:{ span=32-63,224-255 cap=65453 }
[ 7.977962] domain-3: span=0-95,192-287 level=NUMA
[ 7.977963] groups: 0:{ span=0-63,192-255 mask=0-31,192-223 cap=130871 }, 64:{ span=32-95,224-287 mask=64-95,256-287 cap=130929 }
[ 7.977968] domain-4: span=0-127,192-319 level=NUMA
[ 7.977970] groups: 0:{ span=0-95,192-287 cap=196314 }, 96:{ span=96-127,288-319 cap=65439 }
[ 7.977974] domain-5: span=0-127,160-319,352-383 level=NUMA
[ 7.977975] groups: 0:{ span=0-127,192-319 mask=0-31,192-223 cap=261753 }, 160:{ span=160-191,352-383 cap=65430 }
[ 7.977980] domain-6: span=0-383 level=NUMA
>
> > There would be multiple CPUs in that are first in the SMT group
> > with continue_balancing=1 going up in the hierachy and
> > attempting the cmpxchg in the first NUMA domain level,
> > before calling should_we_balance() and finding that they are
> > not the first in the NUMA domain and set continue_balancing=0
> > and abort. Those CPUS are in same L3.
> > But at the same time, there could be CPUs in other sockets
> > cmpxchg on sched_balance_running.
>
> Right, Yu Chen said something like that as well, should_we_balance() is
> too late.
>
> Should we instead move the whole serialize thing inside
> sched_balance_rq() like so:
I think that makes sense. Probably a separate patch.
Tim
>
> ---
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index bc0b7ce8a65d..e9f719ba17e1 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -11722,6 +11722,22 @@ static void update_lb_imbalance_stat(struct lb_env *env, struct sched_domain *sd
> }
> }
>
> +
> +/*
> + * This flag serializes load-balancing passes over large domains
> + * (above the NODE topology level) - only one load-balancing instance
> + * may run at a time, to reduce overhead on very large systems with
> + * lots of CPUs and large NUMA distances.
> + *
> + * - Note that load-balancing passes triggered while another one
> + * is executing are skipped and not re-tried.
> + *
> + * - Also note that this does not serialize rebalance_domains()
> + * execution, as non-SD_SERIALIZE domains will still be
> + * load-balanced in parallel.
> + */
> +static atomic_t sched_balance_running = ATOMIC_INIT(0);
> +
> /*
> * Check this_cpu to ensure it is balanced within domain. Attempt to move
> * tasks if there is an imbalance.
> @@ -11747,6 +11763,7 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
> .fbq_type = all,
> .tasks = LIST_HEAD_INIT(env.tasks),
> };
> + int need_unlock = false;
>
> cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
>
> @@ -11758,6 +11775,12 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
> goto out_balanced;
> }
>
> + if (idle != CPU_NEWLY_IDLE && (sd->flags & SD_SERIALIZE)) {
> + if (atomic_cmpxchg_acquire(&sched_balance_running, 0, 1))
> + goto out_balanced;
> + need_unlock = true;
> + }
> +
> group = sched_balance_find_src_group(&env);
> if (!group) {
> schedstat_inc(sd->lb_nobusyg[idle]);
> @@ -11998,6 +12021,9 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
> sd->balance_interval < sd->max_interval)
> sd->balance_interval *= 2;
> out:
> + if (need_unlock)
> + atomic_set_release(&sched_balance_running, 0);
> +
> return ld_moved;
> }
>
> @@ -12122,21 +12148,6 @@ static int active_load_balance_cpu_stop(void *data)
> return 0;
> }
>
> -/*
> - * This flag serializes load-balancing passes over large domains
> - * (above the NODE topology level) - only one load-balancing instance
> - * may run at a time, to reduce overhead on very large systems with
> - * lots of CPUs and large NUMA distances.
> - *
> - * - Note that load-balancing passes triggered while another one
> - * is executing are skipped and not re-tried.
> - *
> - * - Also note that this does not serialize rebalance_domains()
> - * execution, as non-SD_SERIALIZE domains will still be
> - * load-balanced in parallel.
> - */
> -static atomic_t sched_balance_running = ATOMIC_INIT(0);
> -
> /*
> * Scale the max sched_balance_rq interval with the number of CPUs in the system.
> * This trades load-balance latency on larger machines for less cross talk.
> @@ -12192,7 +12203,7 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
> /* Earliest time when we have to do rebalance again */
> unsigned long next_balance = jiffies + 60*HZ;
> int update_next_balance = 0;
> - int need_serialize, need_decay = 0;
> + int need_decay = 0;
> u64 max_cost = 0;
>
> rcu_read_lock();
> @@ -12216,13 +12227,6 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
> }
>
> interval = get_sd_balance_interval(sd, busy);
> -
> - need_serialize = sd->flags & SD_SERIALIZE;
> - if (need_serialize) {
> - if (atomic_cmpxchg_acquire(&sched_balance_running, 0, 1))
> - goto out;
> - }
> -
> if (time_after_eq(jiffies, sd->last_balance + interval)) {
> if (sched_balance_rq(cpu, rq, sd, idle, &continue_balancing)) {
> /*
> @@ -12236,9 +12240,7 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
> sd->last_balance = jiffies;
> interval = get_sd_balance_interval(sd, busy);
> }
> - if (need_serialize)
> - atomic_set_release(&sched_balance_running, 0);
> -out:
> +
> if (time_after(next_balance, sd->last_balance + interval)) {
> next_balance = sd->last_balance + interval;
> update_next_balance = 1;
Powered by blists - more mailing lists