[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1ce821178bf178ce841ea94bb8139fd9a197b86b.1750268218.git.tim.c.chen@linux.intel.com>
Date: Wed, 18 Jun 2025 11:28:04 -0700
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
K Prateek Nayak <kprateek.nayak@....com>,
"Gautham R . Shenoy" <gautham.shenoy@....com>
Cc: Tim Chen <tim.c.chen@...ux.intel.com>,
Juri Lelli <juri.lelli@...hat.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Tim Chen <tim.c.chen@...el.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Libo Chen <libo.chen@...cle.com>,
Abel Wu <wuyun.abel@...edance.com>,
Madadi Vineeth Reddy <vineethr@...ux.ibm.com>,
Hillf Danton <hdanton@...a.com>,
Len Brown <len.brown@...el.com>,
linux-kernel@...r.kernel.org,
Chen Yu <yu.c.chen@...el.com>
Subject: [RFC patch v3 16/20] sched: Consider LLC locality for active balance
If busiest run queue has only one task, active balance is enlisted
to actually move the task. However, before moving the task,
we should consider whether we are moving the task from its preferred
LLC.
Don't move the single running task in a run queue to another LLC, if
we are moving it from its desired LLC, or moving it will cause too much
imbalance between the LLCs.
Co-developed-by: Chen Yu <yu.c.chen@...el.com>
Signed-off-by: Chen Yu <yu.c.chen@...el.com>
Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
---
kernel/sched/fair.c | 51 ++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 48 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 42222364ad9c..3a8f6fc52055 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -12294,10 +12294,43 @@ imbalanced_active_balance(struct lb_env *env)
return 0;
}
+#ifdef CONFIG_SCHED_CACHE
+static inline bool
+break_llc_locality(struct lb_env *env)
+{
+ if (!sched_feat(SCHED_CACHE))
+ return 0;
+
+ if (cpus_share_cache(env->src_cpu, env->dst_cpu))
+ return 0;
+ /*
+ * All tasks want to stay put. Move only if LLC is
+ * heavily loaded or don't pull a task from its
+ * preferred CPU if it is the only one running.
+ */
+ if (env->src_rq->nr_pref_llc_running == env->src_rq->cfs.h_nr_runnable &&
+ (env->src_rq->nr_running <= 1 ||
+ _get_migrate_hint(env->src_cpu, env->dst_cpu,
+ 0, false) == mig_forbid))
+ return 1;
+
+ return 0;
+}
+#else
+static inline bool
+break_llc_locality(struct lb_env *env)
+{
+ return 0;
+}
+#endif
+
static int need_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
+ if (break_llc_locality(env))
+ return 0;
+
if (asym_active_balance(env))
return 1;
@@ -12317,7 +12350,8 @@ static int need_active_balance(struct lb_env *env)
return 1;
}
- if (env->migration_type == migrate_misfit)
+ if (env->migration_type == migrate_misfit ||
+ env->migration_type == migrate_llc_task)
return 1;
return 0;
@@ -12762,9 +12796,20 @@ static int active_load_balance_cpu_stop(void *data)
goto out_unlock;
/* Is there any task to move? */
- if (busiest_rq->nr_running <= 1)
- goto out_unlock;
+ if (busiest_rq->nr_running <= 1) {
+#ifdef CONFIG_SCHED_CACHE
+ int llc = llc_idx(target_cpu);
+ if (!sched_feat(SCHED_CACHE))
+ goto out_unlock;
+
+ if (llc < 0)
+ goto out_unlock;
+ /* don't migrate if task does not prefer target */
+ if (busiest_rq->nr_pref_llc[llc] < 1)
+#endif
+ goto out_unlock;
+ }
/*
* This condition is "impossible", if it occurs
* we need to fix it. Originally reported by
--
2.32.0
Powered by blists - more mailing lists