lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 29 Oct 2010 08:33:10 +0200
From:	Raistlin <raistlin@...ux.it>
To:	Peter Zijlstra <peterz@...radead.org>
Cc:	Ingo Molnar <mingo@...e.hu>, Thomas Gleixner <tglx@...utronix.de>,
	Steven Rostedt <rostedt@...dmis.org>,
	Chris Friesen <cfriesen@...tel.com>, oleg@...hat.com,
	Frederic Weisbecker <fweisbec@...il.com>,
	Darren Hart <darren@...art.com>,
	Johan Eker <johan.eker@...csson.com>,
	"p.faure" <p.faure@...tech.ch>,
	linux-kernel <linux-kernel@...r.kernel.org>,
	Claudio Scordino <claudio@...dence.eu.com>,
	michael trimarchi <trimarchi@...is.sssup.it>,
	Fabio Checconi <fabio@...dalf.sssup.it>,
	Tommaso Cucinotta <cucinotta@...up.it>,
	Juri Lelli <juri.lelli@...il.com>,
	Nicola Manica <nicola.manica@...i.unitn.it>,
	Luca Abeni <luca.abeni@...tn.it>,
	Dhaval Giani <dhaval@...is.sssup.it>,
	Harald Gustafsson <hgu1972@...il.com>,
	paulmck <paulmck@...ux.vnet.ibm.com>
Subject: [RFC][PATCH 08/22] sched: SCHED_DEADLINE avg_update accounting


Make the core scheduler and load balancer aware of the load
produced by -deadline tasks, by updating the moving average
like for sched_rt.

Signed-off-by: Dario Faggioli <raistlin@...ux.it>
---
 kernel/sched.c      |   13 ++++++++++++-
 kernel/sched_dl.c   |    2 ++
 kernel/sched_fair.c |    4 ++--
 3 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index 79e7c1c..7f0780c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -571,7 +571,7 @@ struct rq {
 
 	unsigned long avg_load_per_task;
 
-	u64 rt_avg;
+	u64 dl_avg, rt_avg;
 	u64 age_stamp;
 	u64 idle_stamp;
 	u64 avg_idle;
@@ -1346,10 +1346,17 @@ static void sched_avg_update(struct rq *rq)
 		 */
 		asm("" : "+rm" (rq->age_stamp));
 		rq->age_stamp += period;
+		rq->dl_avg /= 2;
 		rq->rt_avg /= 2;
 	}
 }
 
+static void sched_dl_avg_update(struct rq *rq, u64 dl_delta)
+{
+	rq->dl_avg += dl_delta;
+	sched_avg_update(rq);
+}
+
 static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 {
 	rq->rt_avg += rt_delta;
@@ -1363,6 +1370,10 @@ static void resched_task(struct task_struct *p)
 	set_tsk_need_resched(p);
 }
 
+static void sched_dl_avg_update(struct rq *rq, u64 dl_delta)
+{
+}
+
 static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 {
 }
diff --git a/kernel/sched_dl.c b/kernel/sched_dl.c
index 26126a6..1bb4308 100644
--- a/kernel/sched_dl.c
+++ b/kernel/sched_dl.c
@@ -509,6 +509,8 @@ static void update_curr_dl(struct rq *rq)
 	curr->se.exec_start = rq->clock;
 	cpuacct_charge(curr, delta_exec);
 
+	sched_dl_avg_update(rq, delta_exec);
+
 	dl_se->runtime -= delta_exec;
 	if (dl_runtime_exceeded(rq, dl_se)) {
 		__dequeue_task_dl(rq, curr, 0);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 54c869c..2afe280 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2276,11 +2276,11 @@ unsigned long scale_rt_power(int cpu)
 
 	total = sched_avg_period() + (rq->clock - rq->age_stamp);
 
-	if (unlikely(total < rq->rt_avg)) {
+	if (unlikely(total < rq->dl_avg + rq->rt_avg)) {
 		/* Ensures that power won't end up being negative */
 		available = 0;
 	} else {
-		available = total - rq->rt_avg;
+		available = total - rq->dl_avg - rq->rt_avg;
 	}
 
 	if (unlikely((s64)total < SCHED_LOAD_SCALE))
-- 
1.7.2.3


-- 
<<This happens because I choose it to happen!>> (Raistlin Majere)
----------------------------------------------------------------------
Dario Faggioli, ReTiS Lab, Scuola Superiore Sant'Anna, Pisa  (Italy)

http://blog.linux.it/raistlin / raistlin@...ga.net /
dario.faggioli@...ber.org

Download attachment "signature.asc" of type "application/pgp-signature" (199 bytes)

Powered by blists - more mailing lists