lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1353157457-3649-2-git-send-email-alex.shi@intel.com>
Date:	Sat, 17 Nov 2012 21:04:13 +0800
From:	Alex Shi <alex.shi@...el.com>
To:	mingo@...hat.com, peterz@...radead.org, pjt@...gle.com,
	preeti@...ux.vnet.ibm.com, vincent.guittot@...aro.org
Cc:	linux-kernel@...r.kernel.org
Subject: [RFC PATCH 1/5] sched: get rq runnable load average for load balance

In load balance, rq load weight is the core of balance judgement.
Now it's time to consider the PJT's runnable load tracking in load
balance.

Since we already have rq runnable_avg_sum and rq load weight,
the rq runnable load average is easy to get:
	runnable_load(rq) = runnable_avg(rq) * weight(rq)

then reuse rq->avg.load_avg_contrib to store the value.

Signed-off-by: Alex Shi <alex.shi@...el.com>
---
 kernel/sched/debug.c |    1 +
 kernel/sched/fair.c  |   20 ++++++++++++++++----
 2 files changed, 17 insertions(+), 4 deletions(-)

diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2cd3c1b..1cd5639 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -71,6 +71,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
 		struct sched_avg *avg = &cpu_rq(cpu)->avg;
 		P(avg->runnable_avg_sum);
 		P(avg->runnable_avg_period);
+		P(avg->load_avg_contrib);
 		return;
 	}
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a624d3b..bc60e43 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1439,14 +1439,25 @@ static inline void __update_tg_runnable_avg(struct sched_avg *sa,
 static inline void __update_group_entity_contrib(struct sched_entity *se) {}
 #endif
 
-static inline void __update_task_entity_contrib(struct sched_entity *se)
+static inline void __update_load_avg_contrib(struct sched_avg *sa,
+						struct load_weight *load)
 {
 	u32 contrib;
 
 	/* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
-	contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
-	contrib /= (se->avg.runnable_avg_period + 1);
-	se->avg.load_avg_contrib = scale_load(contrib);
+	contrib = sa->runnable_avg_sum * scale_load_down(load->weight);
+	contrib /= (sa->runnable_avg_period + 1);
+	sa->load_avg_contrib = scale_load(contrib);
+}
+
+static inline void __update_task_entity_contrib(struct sched_entity *se)
+{
+	__update_load_avg_contrib(&se->avg, &se->load);
+}
+
+static inline void __update_rq_load_contrib(struct rq *rq)
+{
+	__update_load_avg_contrib(&rq->avg, &rq->load);
 }
 
 /* Compute the current contribution to load_avg by se, return any delta */
@@ -1539,6 +1550,7 @@ static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
 {
 	__update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
 	__update_tg_runnable_avg(&rq->avg, &rq->cfs);
+	__update_rq_load_contrib(rq);
 }
 
 /* Add the load generated by se into cfs_rq's child load-average */
-- 
1.7.5.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ