[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240207034704.935774-3-alexs@kernel.org>
Date: Wed, 7 Feb 2024 11:47:03 +0800
From: alexs@...nel.org
To: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Valentin Schneider <vschneid@...hat.com>,
linux-kernel@...r.kernel.org,
ricardo.neri-calderon@...ux.intel.com,
yangyicong@...ilicon.com
Cc: Alex Shi <alexs@...nel.org>
Subject: [PATCH v4 3/4] sched/fair: Rework sched_use_asym_prio() and sched_asym_prefer()
From: Alex Shi <alexs@...nel.org>
sched_use_asym_prio() sched_asym_prefer() are used together in various
places. Consolidate them into a single function sched_asym().
The existing sched_group_asym() is only used when collecting statistics
of a scheduling group. Rename it as sched_group_asym().
This makes the code easier to read. No functional changes.
Signed-off-by: Alex Shi <alexs@...nel.org>
To: Ricardo Neri <ricardo.neri-calderon@...ux.intel.com>
To: Valentin Schneider <vschneid@...hat.com>
To: Vincent Guittot <vincent.guittot@...aro.org>
To: Peter Zijlstra <peterz@...radead.org>
To: Ingo Molnar <mingo@...hat.com>
---
kernel/sched/fair.c | 37 ++++++++++++++++++++-----------------
1 file changed, 20 insertions(+), 17 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 607dc310b355..942b6358f683 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9746,8 +9746,18 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
}
+static inline bool sched_asym(struct sched_domain *sd, int dst_cpu, int src_cpu)
+{
+ /*
+ * First check if @dst_cpu can do asym_packing load balance. Only do it
+ * if it has higher priority than @src_cpu.
+ */
+ return sched_use_asym_prio(sd, dst_cpu) &&
+ sched_asym_prefer(dst_cpu, src_cpu);
+}
+
/**
- * sched_asym - Check if the destination CPU can do asym_packing load balance
+ * sched_group_asym - Check if the destination CPU can do asym_packing balance
* @env: The load balancing environment
* @sgs: Load-balancing statistics of the candidate busiest group
* @group: The candidate busiest group
@@ -9767,22 +9777,17 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
* otherwise.
*/
static inline bool
-sched_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
+sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
{
- /* Ensure that the whole local core is idle, if applicable. */
- if (!sched_use_asym_prio(env->sd, env->dst_cpu))
- return false;
-
/*
- * CPU priorities does not make sense for SMT cores with more than one
+ * CPU priorities do not make sense for SMT cores with more than one
* busy sibling.
*/
- if (group->flags & SD_SHARE_CPUCAPACITY) {
- if (sgs->group_weight - sgs->idle_cpus != 1)
- return false;
- }
+ if ((group->flags & SD_SHARE_CPUCAPACITY) &&
+ (sgs->group_weight - sgs->idle_cpus != 1))
+ return false;
- return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
+ return sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
}
/* One group has more than one SMT CPU while the other group does not */
@@ -9938,7 +9943,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
/* Check if dst CPU is idle and preferred to this group */
if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
- sched_asym(env, sgs, group)) {
+ sched_group_asym(env, sgs, group)) {
sgs->group_asym_packing = 1;
}
@@ -11037,8 +11042,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
* SMT cores with more than one busy sibling.
*/
if ((env->sd->flags & SD_ASYM_PACKING) &&
- sched_use_asym_prio(env->sd, i) &&
- sched_asym_prefer(i, env->dst_cpu) &&
+ sched_asym(env->sd, i, env->dst_cpu) &&
nr_running == 1)
continue;
@@ -11908,8 +11912,7 @@ static void nohz_balancer_kick(struct rq *rq)
* preferred CPU must be idle.
*/
for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
- if (sched_use_asym_prio(sd, i) &&
- sched_asym_prefer(i, cpu)) {
+ if (sched_asym(sd, i, cpu)) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto unlock;
}
--
2.43.0
Powered by blists - more mailing lists