[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <231864b303906a60491bbb9eb7b2e3f083bff248.1760206683.git.tim.c.chen@linux.intel.com>
Date: Sat, 11 Oct 2025 11:24:50 -0700
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
K Prateek Nayak <kprateek.nayak@....com>,
"Gautham R . Shenoy" <gautham.shenoy@....com>
Cc: Tim Chen <tim.c.chen@...ux.intel.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Juri Lelli <juri.lelli@...hat.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Madadi Vineeth Reddy <vineethr@...ux.ibm.com>,
Hillf Danton <hdanton@...a.com>,
Shrikanth Hegde <sshegde@...ux.ibm.com>,
Jianyong Wu <jianyong.wu@...look.com>,
Yangyu Chen <cyy@...self.name>,
Tingyin Duan <tingyin.duan@...il.com>,
Vern Hao <vernhao@...cent.com>,
Len Brown <len.brown@...el.com>,
Aubrey Li <aubrey.li@...el.com>,
Zhao Liu <zhao1.liu@...el.com>,
Chen Yu <yu.chen.surf@...il.com>,
Chen Yu <yu.c.chen@...el.com>,
Libo Chen <libo.chen@...cle.com>,
Adam Li <adamli@...amperecomputing.com>,
Tim Chen <tim.c.chen@...el.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH 13/19] sched/fair: Handle moving single tasks to/from their preferred LLC
If the busiest runqueue has only one task, active balancing may be
invoked to move it. However, before migration, check whether the task
is running on its preferred LLC.
Do not move a lone task to another LLC if it would move the task
away from its preferred LLC or cause excessive imbalance between LLCs.
Co-developed-by: Chen Yu <yu.c.chen@...el.com>
Signed-off-by: Chen Yu <yu.c.chen@...el.com>
Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
---
kernel/sched/fair.c | 62 ++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 59 insertions(+), 3 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bec6354d7841..19ba9c1b9a63 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9826,12 +9826,53 @@ static __maybe_unused enum llc_mig can_migrate_llc_task(int src_cpu, int dst_cpu
return can_migrate_llc(src_cpu, dst_cpu, task_util(p), to_pref);
}
+static inline bool
+break_llc_locality(struct lb_env *env)
+{
+ if (!sched_cache_enabled())
+ return false;
+
+ if (cpus_share_cache(env->src_cpu, env->dst_cpu))
+ return false;
+ /*
+ * All tasks prefer to stay on their current CPU.
+ * Do not pull a task from its preferred CPU if:
+ * 1. It is the only task running there; OR
+ * 2. Migrating it away from its preferred LLC would violate
+ * the cache-aware scheduling policy.
+ */
+ if (env->src_rq->nr_pref_llc_running == env->src_rq->cfs.h_nr_runnable) {
+ unsigned long util = 0;
+ struct task_struct *cur;
+
+ if (env->src_rq->nr_running <= 1)
+ return true;
+
+ rcu_read_lock();
+ cur = rcu_dereference(env->src_rq->curr);
+ if (cur)
+ util = task_util(cur);
+ rcu_read_unlock();
+
+ if (can_migrate_llc(env->src_cpu, env->dst_cpu,
+ util, false) == mig_forbid)
+ return true;
+ }
+
+ return false;
+}
#else
static inline bool get_llc_stats(int cpu, unsigned long *util,
unsigned long *cap)
{
return false;
}
+
+static inline bool
+break_llc_locality(struct lb_env *env)
+{
+ return false;
+}
#endif
/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
@@ -12247,6 +12288,9 @@ static int need_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
+ if (break_llc_locality(env))
+ return 0;
+
if (asym_active_balance(env))
return 1;
@@ -12266,7 +12310,8 @@ static int need_active_balance(struct lb_env *env)
return 1;
}
- if (env->migration_type == migrate_misfit)
+ if (env->migration_type == migrate_misfit ||
+ env->migration_type == migrate_llc_task)
return 1;
return 0;
@@ -12711,9 +12756,20 @@ static int active_load_balance_cpu_stop(void *data)
goto out_unlock;
/* Is there any task to move? */
- if (busiest_rq->nr_running <= 1)
- goto out_unlock;
+ if (busiest_rq->nr_running <= 1) {
+#ifdef CONFIG_SCHED_CACHE
+ int llc = llc_idx(target_cpu);
+ if (!sched_cache_enabled())
+ goto out_unlock;
+
+ if (llc < 0)
+ goto out_unlock;
+ /* don't migrate if no task prefers target */
+ if (busiest_rq->nr_pref_llc[llc] < 1)
+#endif
+ goto out_unlock;
+ }
/*
* This condition is "impossible", if it occurs
* we need to fix it. Originally reported by
--
2.32.0
Powered by blists - more mailing lists