[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <936c261e6283b8fa8c2d7e60493721f6594ce176.1750268218.git.tim.c.chen@linux.intel.com>
Date: Wed, 18 Jun 2025 11:28:01 -0700
From: Tim Chen <tim.c.chen@...ux.intel.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
K Prateek Nayak <kprateek.nayak@....com>,
"Gautham R . Shenoy" <gautham.shenoy@....com>
Cc: Tim Chen <tim.c.chen@...ux.intel.com>,
Juri Lelli <juri.lelli@...hat.com>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>,
Tim Chen <tim.c.chen@...el.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Libo Chen <libo.chen@...cle.com>,
Abel Wu <wuyun.abel@...edance.com>,
Madadi Vineeth Reddy <vineethr@...ux.ibm.com>,
Hillf Danton <hdanton@...a.com>,
Len Brown <len.brown@...el.com>,
linux-kernel@...r.kernel.org,
Chen Yu <yu.c.chen@...el.com>
Subject: [RFC patch v3 13/20] sched: Tag the sched group as llc_balance if it has tasks prefer other LLC
During load balancing between LLCs, check whether there are tasks
preferring the destination LLC. If so, balance those tasks to the
destination LLC first.
Tag the sched_group that has tasks preferring to run on other LLCs
(non-local) with the group_llc_balance flag. This way, the load
balancer will later attempt to pull/push these tasks to their
preferred LLCs.
Co-developed-by: Chen Yu <yu.c.chen@...el.com>
Signed-off-by: Chen Yu <yu.c.chen@...el.com>
Signed-off-by: Tim Chen <tim.c.chen@...ux.intel.com>
---
kernel/sched/fair.c | 43 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 43 insertions(+)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 99f3cee7b276..48a090c6e885 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10454,6 +10454,7 @@ struct sg_lb_stats {
enum group_type group_type;
unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
unsigned int group_smt_balance; /* Task on busy SMT be moved */
+ unsigned int group_llc_balance; /* Tasks should be moved to preferred LLC */
unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
@@ -10818,6 +10819,43 @@ static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats *sgs,
return false;
}
+/*
+ * Do LLC balance on sched group that contains LLC, and have tasks preferring
+ * to run on LLC in idle dst_cpu.
+ */
+#ifdef CONFIG_SCHED_CACHE
+static inline bool llc_balance(struct lb_env *env, struct sg_lb_stats *sgs,
+ struct sched_group *group)
+{
+ struct sched_domain *child = env->sd->child;
+ int llc;
+
+ if (!sched_feat(SCHED_CACHE))
+ return false;
+
+ if (env->sd->flags & SD_SHARE_LLC)
+ return false;
+
+ /* only care about task migration among LLCs */
+ if (child && !(child->flags & SD_SHARE_LLC))
+ return false;
+
+ llc = llc_idx(env->dst_cpu);
+ if (sgs->nr_pref_llc[llc] > 0 &&
+ _get_migrate_hint(env->src_cpu, env->dst_cpu,
+ 0, true) == mig_allow)
+ return true;
+
+ return false;
+}
+#else
+static inline bool llc_balance(struct lb_env *env, struct sg_lb_stats *sgs,
+ struct sched_group *group)
+{
+ return false;
+}
+#endif
+
static inline long sibling_imbalance(struct lb_env *env,
struct sd_lb_stats *sds,
struct sg_lb_stats *busiest,
@@ -11000,6 +11038,11 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
update_sg_if_llc(env, sgs, group);
+
+ /* Check for tasks in this group can be moved to their preferred LLC */
+ if (!local_group && llc_balance(env, sgs, group))
+ sgs->group_llc_balance = 1;
+
/* Computing avg_load makes sense only when group is overloaded */
if (sgs->group_type == group_overloaded)
sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) /
--
2.32.0
Powered by blists - more mailing lists