[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251119124449.1149616-8-sshegde@linux.ibm.com>
Date: Wed, 19 Nov 2025 18:14:39 +0530
From: Shrikanth Hegde <sshegde@...ux.ibm.com>
To: linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org
Cc: sshegde@...ux.ibm.com, mingo@...hat.com, peterz@...radead.org,
juri.lelli@...hat.com, vincent.guittot@...aro.org, tglx@...utronix.de,
yury.norov@...il.com, maddy@...ux.ibm.com, srikar@...ux.ibm.com,
gregkh@...uxfoundation.org, pbonzini@...hat.com, seanjc@...gle.com,
kprateek.nayak@....com, vschneid@...hat.com, iii@...ux.ibm.com,
huschle@...ux.ibm.com, rostedt@...dmis.org, dietmar.eggemann@....com,
christophe.leroy@...roup.eu
Subject: [PATCH 07/17] sched/fair: Don't consider paravirt CPUs for wakeup and load balance
For CFS load balancer,
- mask out paravirt CPUs from list of cpus to balance.
- This helps to restrict/expand the workload depending on the mask.
At wakeup,
- If prev_cpu is paravirt, see if recent_used_cpu can be chosen.
If not choose current cpu.
- For EAS system, put a warning if wake up happens on paravirt CPU.
At this point, not expecting any EAS system will have a overcommit of
CPUs.
Signed-off-by: Shrikanth Hegde <sshegde@...ux.ibm.com>
---
kernel/sched/fair.c | 36 +++++++++++++++++++++++++++++++++++-
1 file changed, 35 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 015e00b370c9..760813802cb9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7358,6 +7358,9 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
{
int target = nr_cpumask_bits;
+ if (cpu_paravirt(prev_cpu))
+ return this_cpu;
+
if (sched_feat(WA_IDLE))
target = wake_affine_idle(this_cpu, prev_cpu, sync);
@@ -7441,6 +7444,11 @@ static inline int sched_balance_find_dst_cpu(struct sched_domain *sd, struct tas
{
int new_cpu = cpu;
+ if (cpu_paravirt(prev_cpu)) {
+ schedstat_inc(p->stats.nr_wakeups_paravirt);
+ return cpu;
+ }
+
if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
return prev_cpu;
@@ -7777,10 +7785,25 @@ static int select_idle_sibling(struct task_struct *p, int this_cpu, int prev, in
unsigned long task_util, util_min, util_max;
int i, recent_used_cpu, prev_aff = -1;
+ /* Likely prev,target belong to same LLC, it is better at wakeup
+ * to move away from them. at best return recent_used_cpu if it
+ * is usable
+ */
+ if (cpu_paravirt(prev) || cpu_paravirt(target)) {
+ schedstat_inc(p->stats.nr_wakeups_paravirt);
+
+ recent_used_cpu = p->recent_used_cpu;
+ if (!cpu_paravirt(recent_used_cpu))
+ return recent_used_cpu;
+ else
+ return this_cpu;
+ }
+
/*
* On asymmetric system, update task utilization because we will check
* that the task fits with CPU's capacity.
*/
+
if (sched_asym_cpucap_active()) {
sync_entity_load_avg(&p->se);
task_util = task_util_est(p);
@@ -8539,8 +8562,14 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
if (!is_rd_overutilized(this_rq()->rd)) {
new_cpu = find_energy_efficient_cpu(p, prev_cpu);
- if (new_cpu >= 0)
+
+ /* System supporting Energy model isn't expected
+ * have a CPU marked as paravirt
+ */
+ if (new_cpu >= 0) {
+ WARN_ON_ONCE(cpu_paravirt(new_cpu));
return new_cpu;
+ }
new_cpu = prev_cpu;
}
@@ -11832,6 +11861,11 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
+#ifdef CONFIG_PARAVIRT
+ /* Don't spread load to paravirt CPUs */
+ cpumask_andnot(cpus, cpus, cpu_paravirt_mask);
+#endif
+
schedstat_inc(sd->lb_count[idle]);
redo:
--
2.47.3
Powered by blists - more mailing lists