[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1483097591-3871-4-git-send-email-lucabe72@gmail.com>
Date: Fri, 30 Dec 2016 12:33:08 +0100
From: Luca Abeni <luca.abeni@...tn.it>
To: linux-kernel@...r.kernel.org
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Juri Lelli <juri.lelli@....com>,
Claudio Scordino <claudio@...dence.eu.com>,
Steven Rostedt <rostedt@...dmis.org>,
Tommaso Cucinotta <tommaso.cucinotta@...up.it>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Luca Abeni <luca.abeni@...tn.it>
Subject: [RFC v4 3/6] sched/deadline: fix the update of the total -deadline utilization
From: Luca Abeni <luca.abeni@...tn.it>
Now that the inactive timer can be armed to fire at the 0-lag time,
it is possible to use inactive_task_timer() to update the total
-deadline utilization (dl_b->total_bw) at the correct time, fixing
dl_overflow() and __setparam_dl().
Signed-off-by: Luca Abeni <luca.abeni@...tn.it>
---
kernel/sched/core.c | 36 ++++++++++++------------------------
kernel/sched/deadline.c | 32 +++++++++++++++++++++++---------
2 files changed, 35 insertions(+), 33 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 98f9944..5030b3c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2509,9 +2509,6 @@ static inline int dl_bw_cpus(int i)
* allocated bandwidth to reflect the new situation.
*
* This function is called while holding p's rq->lock.
- *
- * XXX we should delay bw change until the task's 0-lag point, see
- * __setparam_dl().
*/
static int dl_overflow(struct task_struct *p, int policy,
const struct sched_attr *attr)
@@ -2540,11 +2537,22 @@ static int dl_overflow(struct task_struct *p, int policy,
err = 0;
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
+ /*
+ * XXX this is slightly incorrect: when the task
+ * utilization decreases, we should delay the total
+ * utilization change until the task's 0-lag point.
+ * But this would require to set the task's "inactive
+ * timer" when the task is not inactive.
+ */
__dl_clear(dl_b, p->dl.dl_bw);
__dl_add(dl_b, new_bw);
err = 0;
} else if (!dl_policy(policy) && task_has_dl_policy(p)) {
- __dl_clear(dl_b, p->dl.dl_bw);
+ /*
+ * Do not decrease the total deadline utilization here,
+ * switched_from_dl() will take care to do it at the correct
+ * (0-lag) time.
+ */
err = 0;
}
raw_spin_unlock(&dl_b->lock);
@@ -3914,26 +3922,6 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
dl_se->flags = attr->sched_flags;
dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
-
- /*
- * Changing the parameters of a task is 'tricky' and we're not doing
- * the correct thing -- also see task_dead_dl() and switched_from_dl().
- *
- * What we SHOULD do is delay the bandwidth release until the 0-lag
- * point. This would include retaining the task_struct until that time
- * and change dl_overflow() to not immediately decrement the current
- * amount.
- *
- * Instead we retain the current runtime/deadline and let the new
- * parameters take effect after the current reservation period lapses.
- * This is safe (albeit pessimistic) because the 0-lag point is always
- * before the current scheduling deadline.
- *
- * We can still have temporary overloads because we do not delay the
- * change in bandwidth until that time; so admission control is
- * not on the safe side. It does however guarantee tasks will never
- * consume more than promised.
- */
}
/*
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index cdb7274..c087c3d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -94,8 +94,14 @@ static void task_go_inactive(struct task_struct *p)
*/
if (zerolag_time < 0) {
sub_running_bw(dl_se, dl_rq);
- if (!dl_task(p))
+ if (!dl_task(p)) {
+ struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+
+ raw_spin_lock(&dl_b->lock);
+ __dl_clear(dl_b, p->dl.dl_bw);
__dl_clear_params(p);
+ raw_spin_unlock(&dl_b->lock);
+ }
return;
}
@@ -850,9 +856,14 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
rq = task_rq_lock(p, &rf);
if (!dl_task(p) || p->state == TASK_DEAD) {
+ struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+
if (p->state == TASK_DEAD && dl_se->dl_non_contending)
sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
+ raw_spin_lock(&dl_b->lock);
+ __dl_clear(dl_b, p->dl.dl_bw);
+ raw_spin_unlock(&dl_b->lock);
__dl_clear_params(p);
goto unlock;
@@ -1375,15 +1386,18 @@ static void task_fork_dl(struct task_struct *p)
static void task_dead_dl(struct task_struct *p)
{
- struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+ if (!hrtimer_active(&p->dl.inactive_timer)) {
+ struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
- /*
- * Since we are TASK_DEAD we won't slip out of the domain!
- */
- raw_spin_lock_irq(&dl_b->lock);
- /* XXX we should retain the bw until 0-lag */
- dl_b->total_bw -= p->dl.dl_bw;
- raw_spin_unlock_irq(&dl_b->lock);
+ /*
+ * If the "inactive timer is not active, the 0-lag time
+ * is already passed, so we immediately decrease the
+ * total deadline utilization
+ */
+ raw_spin_lock_irq(&dl_b->lock);
+ __dl_clear(dl_b, p->dl.dl_bw);
+ raw_spin_unlock_irq(&dl_b->lock);
+ }
}
static void set_curr_task_dl(struct rq *rq)
--
2.7.4
Powered by blists - more mailing lists