[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200207050854.GF27398@codeaurora.org>
Date: Fri, 7 Feb 2020 10:38:54 +0530
From: Pavan Kondeti <pkondeti@...eaurora.org>
To: Valentin Schneider <valentin.schneider@....com>
Cc: linux-kernel@...r.kernel.org, mingo@...hat.com,
peterz@...radead.org, vincent.guittot@...aro.org,
dietmar.eggemann@....com, morten.rasmussen@....com,
qperret@...gle.com, adharmap@...eaurora.org
Subject: Re: [PATCH v4 1/4] sched/fair: Add asymmetric CPU capacity wakeup
scan
On Thu, Feb 06, 2020 at 07:19:54PM +0000, Valentin Schneider wrote:
> From: Morten Rasmussen <morten.rasmussen@....com>
>
<snip>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index fe4e0d7753756..9a5a6e9d2375e 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5894,6 +5894,40 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
> return cpu;
> }
>
> +/*
> + * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which
> + * the task fits. If no CPU is big enough, but there are idle ones, try to
> + * maximize capacity.
> + */
> +static int
> +select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
> +{
> + unsigned long best_cap = 0;
> + int cpu, best_cpu = -1;
> + struct cpumask *cpus;
> +
> + sync_entity_load_avg(&p->se);
> +
> + cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
> + cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
> +
> + for_each_cpu_wrap(cpu, cpus, target) {
> + unsigned long cpu_cap = capacity_of(cpu);
> +
> + if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
> + continue;
> + if (task_fits_capacity(p, cpu_cap))
> + return cpu;
> +
> + if (cpu_cap > best_cap) {
> + best_cap = cpu_cap;
> + best_cpu = cpu;
> + }
> + }
> +
> + return best_cpu;
> +}
> +
> /*
> * Try and locate an idle core/thread in the LLC cache domain.
> */
> @@ -5902,6 +5936,28 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
> struct sched_domain *sd;
> int i, recent_used_cpu;
>
> + /*
> + * For asymmetric CPU capacity systems, our domain of interest is
> + * sd_asym_cpucapacity rather than sd_llc.
> + */
> + if (static_branch_unlikely(&sched_asym_cpucapacity)) {
> + sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target));
> + /*
> + * On an asymmetric CPU capacity system where an exclusive
> + * cpuset defines a symmetric island (i.e. one unique
> + * capacity_orig value through the cpuset), the key will be set
> + * but the CPUs within that cpuset will not have a domain with
> + * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric
> + * capacity path.
> + */
> + if (!sd)
> + goto symmetric;
> +
> + i = select_idle_capacity(p, sd, target);
> + return ((unsigned)i < nr_cpumask_bits) ? i : target;
> + }
> +
> +symmetric:
> if (available_idle_cpu(target) || sched_idle_cpu(target))
> return target;
>
> --
> 2.24.0
>
Looks good to me.
Thanks,
Pavan
--
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc.
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum, a Linux Foundation Collaborative Project.
Powered by blists - more mailing lists