[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220721142949.fqmabrjwylkuoltw@wubuntu>
Date: Thu, 21 Jul 2022 15:29:49 +0100
From: Qais Yousef <qais.yousef@....com>
To: Vincent Guittot <vincent.guittot@...aro.org>
Cc: Ingo Molnar <mingo@...nel.org>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
linux-kernel@...r.kernel.org, Xuewen Yan <xuewen.yan94@...il.com>,
Wei Wang <wvw@...gle.com>,
Jonathan JMChen <Jonathan.JMChen@...iatek.com>,
Hank <han.lin@...iatek.com>
Subject: Re: [PATCH 2/7] sched/uclamp: Make task_fits_capacity() use
util_fits_cpu()
On 07/12/22 11:48, Qais Yousef wrote:
> On 07/11/22 15:09, Vincent Guittot wrote:
> > On Wed, 29 Jun 2022 at 21:48, Qais Yousef <qais.yousef@....com> wrote:
>
> [...]
>
> > > @@ -8502,15 +8504,16 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
> > > trace_sched_cpu_capacity_tp(cpu_rq(cpu));
> > >
> > > sdg->sgc->capacity = capacity;
> > > - sdg->sgc->min_capacity = capacity;
> > > - sdg->sgc->max_capacity = capacity;
> > > + sdg->sgc->min_capacity_cpu = cpu;
> > > + sdg->sgc->max_capacity_cpu = cpu;
> >
> > you make these fields useless. There is only one cpu per sched_group
> > at this level so you don't need to save the twice cpu number of the
> > nly cpu of this group
>
> Ah, so we can use group->asym_prefer_cpu then?
>
> I think I got confused and thought we could cover multiple capacity levels
> there.
>
> > > }
> > >
> > > void update_group_capacity(struct sched_domain *sd, int cpu)
> > > {
> > > - struct sched_domain *child = sd->child;
> > > struct sched_group *group, *sdg = sd->groups;
> > > - unsigned long capacity, min_capacity, max_capacity;
> > > + struct sched_domain *child = sd->child;
> > > + int min_capacity_cpu, max_capacity_cpu;
> > > + unsigned long capacity;
> > > unsigned long interval;
> > >
> > > interval = msecs_to_jiffies(sd->balance_interval);
> > > @@ -8523,8 +8526,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
> > > }
> > >
> > > capacity = 0;
> > > - min_capacity = ULONG_MAX;
> > > - max_capacity = 0;
> > > + min_capacity_cpu = max_capacity_cpu = cpu;
> > >
> > > if (child->flags & SD_OVERLAP) {
> > > /*
> > > @@ -8536,29 +8538,44 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
> > > unsigned long cpu_cap = capacity_of(cpu);
> > >
> > > capacity += cpu_cap;
> > > - min_capacity = min(cpu_cap, min_capacity);
> > > - max_capacity = max(cpu_cap, max_capacity);
> > > + if (cpu_cap < capacity_of(min_capacity_cpu))
> > > + min_capacity_cpu = cpu;
> > > +
> > > + if (cpu_cap > capacity_of(max_capacity_cpu))
> > > + max_capacity_cpu = cpu;
> > > }
> > > } else {
> > > /*
> > > * !SD_OVERLAP domains can assume that child groups
> > > * span the current group.
> > > */
> > > + unsigned long min_capacity = ULONG_MAX;
> > > + unsigned long max_capacity = 0;
> > >
> > > group = child->groups;
> > > do {
> > > struct sched_group_capacity *sgc = group->sgc;
> > > + unsigned long cpu_cap_min = capacity_of(sgc->min_capacity_cpu);
> > > + unsigned long cpu_cap_max = capacity_of(sgc->max_capacity_cpu);
> >
> > By replacing sgc->min_capacity with sgc->min_capacity_cpu, the
> > min_capacity is no more stable and can become > max_capacity
>
> Right.
>
> >
> > >
> > > capacity += sgc->capacity;
> > > - min_capacity = min(sgc->min_capacity, min_capacity);
> > > - max_capacity = max(sgc->max_capacity, max_capacity);
> > > + if (cpu_cap_min < min_capacity) {
> > > + min_capacity = cpu_cap_min;
> > > + min_capacity_cpu = sgc->min_capacity_cpu;
> > > + }
> > > +
> > > + if (cpu_cap_max > max_capacity) {
> > > + max_capacity = cpu_cap_max;
> > > + max_capacity_cpu = sgc->max_capacity_cpu;
> > > + }
> > > +
> > > group = group->next;
> > > } while (group != child->groups);
> > > }
> > >
> > > sdg->sgc->capacity = capacity;
> > > - sdg->sgc->min_capacity = min_capacity;
> > > - sdg->sgc->max_capacity = max_capacity;
> > > + sdg->sgc->min_capacity_cpu = min_capacity_cpu;
> > > + sdg->sgc->max_capacity_cpu = max_capacity_cpu;
> > > }
> > >
> > > /*
> > > @@ -8902,7 +8919,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > > * internally or be covered by avg_load imbalance (eventually).
> > > */
> > > if (sgs->group_type == group_misfit_task &&
> > > - (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) ||
> > > + (!capacity_greater(env->dst_cpu, sg->sgc->max_capacity_cpu) ||
> > > sds->local_stat.group_type != group_has_spare))
> > > return false;
> > >
> > > @@ -8986,7 +9003,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
> > > */
> > > if ((env->sd->flags & SD_ASYM_CPUCAPACITY) &&
> > > (sgs->group_type <= group_fully_busy) &&
> > > - (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu))))
> > > + (capacity_greater(sg->sgc->min_capacity_cpu, env->dst_cpu)))
> > > return false;
> > >
> > > return true;
> > > @@ -9108,7 +9125,7 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
> > >
> > > /* Check if task fits in the group */
> > > if (sd->flags & SD_ASYM_CPUCAPACITY &&
> > > - !task_fits_capacity(p, group->sgc->max_capacity)) {
> > > + !task_fits_cpu(p, group->sgc->max_capacity_cpu)) {
> >
> > All the changes and added complexity above for this line. Can't you
> > find another way ?
>
> You're right, I might have got carried away trying to keep the logic the same.
>
> Can we use group->asym_prefer_cpu or pick a cpu from group->sgc->cpumask
> instead?
>
> I'll dig more into it anyway and try to come up with simpler alternative.
Actually we can't.
I can keep the current {max,min}_capacity field and just add the new
{max,min}_capacity_cpu and use them where needed. Should address your concerns
this way? That was actually the first version of the code, but then it seemed
redundant to keep both {max,min}_capacity and {max,min}_capacity_cpu.
OR
I can add a new function to search for max spare capacity cpu in the group.
Preference?
Thanks!
--
Qais Yousef
Powered by blists - more mailing lists