[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1146ab2f-56c7-43f3-b26b-d91d2bd08556@arm.com>
Date: Thu, 29 Feb 2024 00:34:28 +0100
From: Dietmar Eggemann <dietmar.eggemann@....com>
To: Shrikanth Hegde <sshegde@...ux.ibm.com>,
Pierre Gondois <pierre.gondois@....com>
Cc: yu.c.chen@...el.com, linux-kernel@...r.kernel.org, nysal@...ux.ibm.com,
aboorvad@...ux.ibm.com, srikar@...ux.vnet.ibm.com, vschneid@...hat.com,
morten.rasmussen@....com, qyousef@...alina.io, mingo@...nel.org,
peterz@...radead.org, vincent.guittot@...aro.org
Subject: Re: [PATCH v2 1/2] sched/fair: Add EAS checks before updating
overutilized
On 28/02/2024 18:24, Shrikanth Hegde wrote:
>
>
> On 2/28/24 9:28 PM, Pierre Gondois wrote:
[...]
> But we will do some extra computation currently and then not use it if it
> Non-EAS case in update_sg_lb_stats
>
> Would something like this makes sense?
> @@ -9925,7 +9925,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> if (nr_running > 1)
> *sg_status |= SG_OVERLOAD;
>
> - if (cpu_overutilized(i))
> + if (sched_energy_enabled() && cpu_overutilized(i))
> *sg_status |= SG_OVERUTILIZED;
Yes, we could also disable the setting of OU in load_balance in the none
!EAS case.
[...]
>> NIT:
>> When called from check_update_overutilized_status(),
>> sched_energy_enabled() will be checked twice.
> Yes.
> But, I think that's okay since it is a static branch check at best.
> This way it keeps the code simpler.
You could keep the ched_energy_enabled() outside of the new
set_overutilized_status() to avoid this:
-->8--
---
kernel/sched/fair.c | 32 ++++++++++++++++++--------------
1 file changed, 18 insertions(+), 14 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 32bc98d9123d..c82164bf45f3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6676,12 +6676,19 @@ static inline bool cpu_overutilized(int cpu)
return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
}
+static inline void set_overutilized_status(struct rq *rq, unsigned int val)
+{
+ WRITE_ONCE(rq->rd->overutilized, val);
+ trace_sched_overutilized_tp(rq->rd, val);
+}
+
static inline void update_overutilized_status(struct rq *rq)
{
- if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
- WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
- }
+ if (!sched_energy_enabled())
+ return;
+
+ if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu))
+ set_overutilized_status(rq, SG_OVERUTILIZED);
}
#else
static inline void update_overutilized_status(struct rq *rq) { }
@@ -10755,19 +10762,16 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
env->fbq_type = fbq_classify_group(&sds->busiest_stat);
if (!env->sd->parent) {
- struct root_domain *rd = env->dst_rq->rd;
-
/* update overload indicator if we are at root domain */
- WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
+ WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
/* Update over-utilization (tipping point, U >= 0) indicator */
- WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
- } else if (sg_status & SG_OVERUTILIZED) {
- struct root_domain *rd = env->dst_rq->rd;
-
- WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
+ if (sched_energy_enabled()) {
+ set_overutilized_status(env->dst_rq,
+ sg_status & SG_OVERUTILIZED);
+ }
+ } else if (sched_energy_enabled() && sg_status & SG_OVERUTILIZED) {
+ set_overutilized_status(env->dst_rq, SG_OVERUTILIZED);
}
update_idle_cpu_scan(env, sum_util);
--
2.25.1
Powered by blists - more mailing lists