[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250217113252.21796-3-huschle@linux.ibm.com>
Date: Mon, 17 Feb 2025 12:32:51 +0100
From: Tobias Huschle <huschle@...ux.ibm.com>
To: linux-kernel@...r.kernel.org
Cc: mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
vschneid@...hat.com, sshegde@...ux.ibm.com,
linuxppc-dev@...ts.ozlabs.org, linux-s390@...r.kernel.org
Subject: [RFC PATCH v2 2/3] sched/fair: adapt scheduler group weight and capacity for parked CPUs
Parked CPUs should not be considered to be available for computation.
This implies, that they should also not contribute to the overall weight
of scheduler groups, as a large group of parked CPUs should not attempt
to process any tasks, hence, a small group of non-parked CPUs should be
considered to have a larger weight.
The same consideration holds true for the CPU capacities of such groups.
A group of parked CPUs should not be considered to have any capacity.
Signed-off-by: Tobias Huschle <huschle@...ux.ibm.com>
---
kernel/sched/fair.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5eb1a3113704..287c6648a41d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9913,6 +9913,8 @@ struct sg_lb_stats {
unsigned int sum_nr_running; /* Nr of all tasks running in the group */
unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
unsigned int sum_nr_parked;
+ unsigned int parked_cpus;
+ unsigned int parked_capacity;
unsigned int idle_cpus; /* Nr of idle CPUs in the group */
unsigned int group_weight;
enum group_type group_type;
@@ -10369,6 +10371,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
*sg_overutilized = 1;
sgs->sum_nr_parked += arch_cpu_parked(i) * rq->cfs.h_nr_queued;
+ sgs->parked_capacity += arch_cpu_parked(i) * capacity_of(i);
+ sgs->parked_cpus += arch_cpu_parked(i);
/*
* No need to call idle_cpu() if nr_running is not 0
@@ -10406,9 +10410,11 @@ static inline void update_sg_lb_stats(struct lb_env *env,
}
}
- sgs->group_capacity = group->sgc->capacity;
+ sgs->group_capacity = group->sgc->capacity - sgs->parked_capacity;
+ if (!sgs->group_capacity)
+ sgs->group_capacity = 1;
- sgs->group_weight = group->group_weight;
+ sgs->group_weight = group->group_weight - sgs->parked_cpus;
/* Check if dst CPU is idle and preferred to this group */
if (!local_group && env->idle && sgs->sum_h_nr_running &&
@@ -10692,6 +10698,8 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
sgs->sum_nr_running += nr_running;
sgs->sum_nr_parked += arch_cpu_parked(i) * rq->cfs.h_nr_queued;
+ sgs->parked_capacity += arch_cpu_parked(i) * capacity_of(i);
+ sgs->parked_cpus += arch_cpu_parked(i);
/*
* No need to call idle_cpu_without() if nr_running is not 0
@@ -10707,9 +10715,11 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
}
- sgs->group_capacity = group->sgc->capacity;
+ sgs->group_capacity = group->sgc->capacity - sgs->parked_capacity;
+ if (!sgs->group_capacity)
+ sgs->group_capacity = 1;
- sgs->group_weight = group->group_weight;
+ sgs->group_weight = group->group_weight - sgs->parked_cpus;
sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
--
2.34.1
Powered by blists - more mailing lists