[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251204175405.1511340-2-srikar@linux.ibm.com>
Date: Thu, 4 Dec 2025 23:23:49 +0530
From: Srikar Dronamraju <srikar@...ux.ibm.com>
To: linux-kernel@...r.kernel.org, linuxppc-dev@...ts.ozlabs.org,
Peter Zijlstra <peterz@...radead.org>
Cc: Ben Segall <bsegall@...gle.com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Ingo Molnar <mingo@...nel.org>, Juri Lelli <juri.lelli@...hat.com>,
K Prateek Nayak <kprateek.nayak@....com>,
Madhavan Srinivasan <maddy@...ux.ibm.com>,
Mel Gorman <mgorman@...e.de>, Michael Ellerman <mpe@...erman.id.au>,
Nicholas Piggin <npiggin@...il.com>,
Shrikanth Hegde <sshegde@...ux.ibm.com>,
Srikar Dronamraju <srikar@...ux.ibm.com>,
Steven Rostedt <rostedt@...dmis.org>,
Swapnil Sapkal <swapnil.sapkal@....com>,
Thomas Huth <thuth@...hat.com>,
Valentin Schneider <vschneid@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
virtualization@...ts.linux.dev, Yicong Yang <yangyicong@...ilicon.com>,
Ilya Leoshkevich <iii@...ux.ibm.com>
Subject: [PATCH 01/17] sched/fair: Enable group_asym_packing in find_idlest_group
Current scheduler code doesn't handle SD_ASYM_PACKING in the
find_idlest_cpu path. On few architectures, like Powerpc, cache is at a
core. Moving threads across cores may end up in cache misses.
While asym_packing can be enabled above SMT level, enabling Asym packing
across cores could result in poorer performance due to cache misses.
However if the initial task placement via find_idlest_cpu does take
asym_packing into consideration, then scheduler can avoid asym_packing
migrations. This will result in lesser migrations and better packing and
better overall performance.
Previous version was posted at
https://lore.kernel.org/all/20231018155036.2314342-1-srikar@linux.vnet.ibm.com/t
Signed-off-by: Srikar Dronamraju <srikar@...ux.ibm.com>
---
kernel/sched/fair.c | 33 ++++++++++++++++++++++++++++-----
1 file changed, 28 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5b752324270b..979c3e333fba 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10664,11 +10664,13 @@ static int idle_cpu_without(int cpu, struct task_struct *p)
* @group: sched_group whose statistics are to be updated.
* @sgs: variable to hold the statistics for this group.
* @p: The task for which we look for the idlest group/CPU.
+ * @this_cpu: current cpu
*/
static inline void update_sg_wakeup_stats(struct sched_domain *sd,
struct sched_group *group,
struct sg_lb_stats *sgs,
- struct task_struct *p)
+ struct task_struct *p,
+ int asym_prefer_cpu)
{
int i, nr_running;
@@ -10705,6 +10707,12 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
}
+ if (asym_prefer_cpu != READ_ONCE(group->asym_prefer_cpu) &&
+ sched_asym(sd, READ_ONCE(group->asym_prefer_cpu),
+ READ_ONCE(asym_prefer_cpu))) {
+ sgs->group_asym_packing = 1;
+ }
+
sgs->group_capacity = group->sgc->capacity;
sgs->group_weight = group->group_weight;
@@ -10721,7 +10729,8 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
sgs->group_capacity;
}
-static bool update_pick_idlest(struct sched_group *idlest,
+static bool update_pick_idlest(struct sched_domain *sd,
+ struct sched_group *idlest,
struct sg_lb_stats *idlest_sgs,
struct sched_group *group,
struct sg_lb_stats *sgs)
@@ -10745,8 +10754,11 @@ static bool update_pick_idlest(struct sched_group *idlest,
return false;
break;
- case group_imbalanced:
case group_asym_packing:
+ return sched_asym(sd, READ_ONCE(group->asym_prefer_cpu),
+ READ_ONCE(idlest->asym_prefer_cpu));
+
+ case group_imbalanced:
case group_smt_balance:
/* Those types are not used in the slow wakeup path */
return false;
@@ -10790,6 +10802,7 @@ sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int
.avg_load = UINT_MAX,
.group_type = group_overloaded,
};
+ int asym_prefer_cpu;
do {
int local_group;
@@ -10812,10 +10825,12 @@ sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int
} else {
sgs = &tmp_sgs;
}
+ if (!local || local_group)
+ asym_prefer_cpu = READ_ONCE(group->asym_prefer_cpu);
- update_sg_wakeup_stats(sd, group, sgs, p);
+ update_sg_wakeup_stats(sd, group, sgs, p, asym_prefer_cpu);
- if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) {
+ if (!local_group && update_pick_idlest(sd, idlest, &idlest_sgs, group, sgs)) {
idlest = group;
idlest_sgs = *sgs;
}
@@ -10845,6 +10860,14 @@ sched_balance_find_dst_group(struct sched_domain *sd, struct task_struct *p, int
if (local_sgs.group_type > idlest_sgs.group_type)
return idlest;
+ if (idlest_sgs.group_type == group_asym_packing) {
+ if (sched_asym(sd, READ_ONCE(idlest->asym_prefer_cpu),
+ READ_ONCE(local->asym_prefer_cpu))) {
+ return idlest;
+ }
+ return NULL;
+ }
+
switch (local_sgs.group_type) {
case group_overloaded:
case group_fully_busy:
--
2.43.7
Powered by blists - more mailing lists