[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1452785094-3086-2-git-send-email-luca.abeni@unitn.it>
Date: Thu, 14 Jan 2016 16:24:46 +0100
From: Luca Abeni <luca.abeni@...tn.it>
To: linux-kernel@...r.kernel.org
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Juri Lelli <juri.lelli@....com>,
Luca Abeni <luca.abeni@...tn.it>
Subject: [RFC 1/8] Track the active utilisation
The active utilisation here is defined as the total utilisation of the
active (TASK_RUNNING) tasks queued on a runqueue. Hence, it is increased
when a task wakes up and is decreased when a task blocks.
This might need to be fixed / improved by decreasing the active
utilisation at the so-called "0-lag time" instead of when the task blocks.
Signed-off-by: Juri Lelli <juri.lelli@....com>
---
kernel/sched/deadline.c | 36 +++++++++++++++++++++++++++++++++++-
kernel/sched/sched.h | 5 +++++
2 files changed, 40 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index cd64c97..e779cce 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -43,6 +43,24 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se)
return !RB_EMPTY_NODE(&dl_se->rb_node);
}
+static void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ u64 se_bw = dl_se->dl_bw;
+
+ dl_rq->running_bw += se_bw;
+}
+
+static void clear_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ u64 se_bw = dl_se->dl_bw;
+
+ dl_rq->running_bw -= se_bw;
+ if (dl_rq->running_bw < 0) {
+ WARN_ON(1);
+ dl_rq->running_bw = 0;
+ }
+}
+
static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
{
struct sched_dl_entity *dl_se = &p->dl;
@@ -500,6 +518,8 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
struct rq *rq = rq_of_dl_rq(dl_rq);
+ add_running_bw(dl_se, dl_rq);
+
/*
* The arrival of a new instance needs special treatment, i.e.,
* the actual scheduling parameters have to be "renewed".
@@ -961,8 +981,10 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
* its rq, the bandwidth timer callback (which clearly has not
* run yet) will take care of this.
*/
- if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
+ if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
+ add_running_bw(&p->dl, &rq->dl);
return;
+ }
enqueue_dl_entity(&p->dl, pi_se, flags);
@@ -980,6 +1002,8 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
update_curr_dl(rq);
__dequeue_task_dl(rq, p, flags);
+ if (flags & DEQUEUE_SLEEP)
+ clear_running_bw(&p->dl, &rq->dl);
}
/*
@@ -1218,6 +1242,8 @@ static void task_fork_dl(struct task_struct *p)
static void task_dead_dl(struct task_struct *p)
{
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
+ struct dl_rq *dl_rq = dl_rq_of_se(&p->dl);
+ struct rq *rq = rq_of_dl_rq(dl_rq);
/*
* Since we are TASK_DEAD we won't slip out of the domain!
@@ -1226,6 +1252,10 @@ static void task_dead_dl(struct task_struct *p)
/* XXX we should retain the bw until 0-lag */
dl_b->total_bw -= p->dl.dl_bw;
raw_spin_unlock_irq(&dl_b->lock);
+
+ if (task_on_rq_queued(p)) {
+ clear_running_bw(&p->dl, &rq->dl);
+ }
}
static void set_curr_task_dl(struct rq *rq)
@@ -1705,6 +1735,10 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
if (!start_dl_timer(p))
__dl_clear_params(p);
+ if (task_on_rq_queued(p)) {
+ clear_running_bw(&p->dl, &rq->dl);
+ }
+
/*
* Since this might be the only -deadline task on the rq,
* this is the right place to try to pull some other one
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 10f1637..826ca6a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -519,6 +519,11 @@ struct dl_rq {
#else
struct dl_bw dl_bw;
#endif
+ /* This is the "active utilization" for this runqueue.
+ * Increased when a task wakes up (becomes TASK_RUNNING)
+ * and decreased when a task blocks
+ */
+ s64 running_bw;
};
#ifdef CONFIG_SMP
--
1.9.1
Powered by blists - more mailing lists