[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20200522145827.GD600689@localhost.localdomain>
Date: Fri, 22 May 2020 16:58:27 +0200
From: Juri Lelli <juri.lelli@...hat.com>
To: Dietmar Eggemann <dietmar.eggemann@....com>
Cc: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Vincent Guittot <vincent.guittot@...aro.org>,
Steven Rostedt <rostedt@...dmis.org>,
Luca Abeni <luca.abeni@...tannapisa.it>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Wei Wang <wvw@...gle.com>, Quentin Perret <qperret@...gle.com>,
Alessio Balsini <balsini@...gle.com>,
Pavan Kondeti <pkondeti@...eaurora.org>,
Patrick Bellasi <patrick.bellasi@...bug.net>,
Morten Rasmussen <morten.rasmussen@....com>,
Valentin Schneider <valentin.schneider@....com>,
Qais Yousef <qais.yousef@....com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 3/5] sched/deadline: Improve admission control for
asymmetric CPU capacities
On 20/05/20 15:42, Dietmar Eggemann wrote:
> From: Luca Abeni <luca.abeni@...tannapisa.it>
>
> The current SCHED_DEADLINE (DL) admission control ensures that
>
> sum of reserved CPU bandwidth < x * M
>
> where
>
> x = /proc/sys/kernel/sched_rt_{runtime,period}_us
> M = # CPUs in root domain.
>
> DL admission control works well for homogeneous systems where the
> capacity of all CPUs are equal (1024). I.e. bounded tardiness for DL
> and non-starvation of non-DL tasks is guaranteed.
>
> But on heterogeneous systems where capacity of CPUs are different it
> could fail by over-allocating CPU time on smaller capacity CPUs.
>
> On an Arm big.LITTLE/DynamIQ system DL tasks can easily starve other
> tasks making it unusable.
>
> Fix this by explicitly considering the CPU capacity in the DL admission
> test by replacing M with the root domain CPU capacity sum.
>
> Signed-off-by: Luca Abeni <luca.abeni@...tannapisa.it>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@....com>
> ---
> kernel/sched/deadline.c | 30 +++++++++++++++++-------------
> kernel/sched/sched.h | 6 +++---
> 2 files changed, 20 insertions(+), 16 deletions(-)
>
> diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
> index ea7282ce484c..fa8566517715 100644
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -2590,11 +2590,12 @@ void sched_dl_do_global(void)
> int sched_dl_overflow(struct task_struct *p, int policy,
> const struct sched_attr *attr)
> {
> - struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
> u64 period = attr->sched_period ?: attr->sched_deadline;
> u64 runtime = attr->sched_runtime;
> u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
> - int cpus, err = -1;
> + int cpus, err = -1, cpu = task_cpu(p);
> + struct dl_bw *dl_b = dl_bw_of(cpu);
> + unsigned long cap;
>
> if (attr->sched_flags & SCHED_FLAG_SUGOV)
> return 0;
> @@ -2609,15 +2610,17 @@ int sched_dl_overflow(struct task_struct *p, int policy,
> * allocated bandwidth of the container.
> */
> raw_spin_lock(&dl_b->lock);
> - cpus = dl_bw_cpus(task_cpu(p));
> + cpus = dl_bw_cpus(cpu);
> + cap = dl_bw_capacity(cpu);
> +
> if (dl_policy(policy) && !task_has_dl_policy(p) &&
> - !__dl_overflow(dl_b, cpus, 0, new_bw)) {
> + !__dl_overflow(dl_b, cap, 0, new_bw)) {
> if (hrtimer_active(&p->dl.inactive_timer))
> __dl_sub(dl_b, p->dl.dl_bw, cpus);
> __dl_add(dl_b, new_bw, cpus);
> err = 0;
> } else if (dl_policy(policy) && task_has_dl_policy(p) &&
> - !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
> + !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
> /*
> * XXX this is slightly incorrect: when the task
> * utilization decreases, we should delay the total
> @@ -2753,19 +2756,19 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
> #ifdef CONFIG_SMP
> int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
> {
> + unsigned long flags, cap;
> unsigned int dest_cpu;
> struct dl_bw *dl_b;
> bool overflow;
> - int cpus, ret;
> - unsigned long flags;
> + int ret;
>
> dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
>
> rcu_read_lock_sched();
> dl_b = dl_bw_of(dest_cpu);
> raw_spin_lock_irqsave(&dl_b->lock, flags);
> - cpus = dl_bw_cpus(dest_cpu);
> - overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
> + cap = dl_bw_capacity(dest_cpu);
> + overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);
> if (overflow) {
> ret = -EBUSY;
> } else {
> @@ -2775,6 +2778,8 @@ int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allo
> * We will free resources in the source root_domain
> * later on (see set_cpus_allowed_dl()).
> */
> + int cpus = dl_bw_cpus(dest_cpu);
> +
> __dl_add(dl_b, p->dl.dl_bw, cpus);
> ret = 0;
> }
> @@ -2807,16 +2812,15 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
>
> bool dl_cpu_busy(unsigned int cpu)
> {
> - unsigned long flags;
> + unsigned long flags, cap;
> struct dl_bw *dl_b;
> bool overflow;
> - int cpus;
>
> rcu_read_lock_sched();
> dl_b = dl_bw_of(cpu);
> raw_spin_lock_irqsave(&dl_b->lock, flags);
> - cpus = dl_bw_cpus(cpu);
> - overflow = __dl_overflow(dl_b, cpus, 0, 0);
> + cap = dl_bw_capacity(cpu);
> + overflow = __dl_overflow(dl_b, cap, 0, 0);
> raw_spin_unlock_irqrestore(&dl_b->lock, flags);
> rcu_read_unlock_sched();
>
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 21416b30c520..14cb6a97e2d2 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -310,11 +310,11 @@ void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
> __dl_update(dl_b, -((s32)tsk_bw / cpus));
> }
>
> -static inline
> -bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
> +static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap,
> + u64 old_bw, u64 new_bw)
> {
> return dl_b->bw != -1 &&
> - dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
> + cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
> }
>
> extern void init_dl_bw(struct dl_bw *dl_b);
> --
Acked-by: Juri Lelli <juri.lelli@...hat.com>
Powered by blists - more mailing lists