lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 04 Jan 2011 17:02:35 +0100
From:	Dario Faggioli <raistlin@...ux.it>
To:	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc:	linux-kernel <linux-kernel@...r.kernel.org>,
	Steven Rostedt <rostedt@...dmis.org>,
	Gregory Haskins <ghaskins@...ell.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Ingo Molnar <mingo@...e.hu>, Mike Galbraith <efault@....de>,
	Dhaval Giani <dhaval@...is.sssup.it>,
	Fabio Checconi <fabio@...dalf.sssup.it>,
	Darren Hart <darren@...art.com>, oleg <oleg@...hat.com>,
	paulmck <paulmck@...ux.vnet.ibm.com>, pjt@...gle.com,
	bharata@...ux.vnet.ibm.co, lucas.de.marchi@...il.com
Subject: [RFC][PATCH 3/3] sched: beautify access to some common fields.

Fields like on_rq or statistics are accessed quite frequently
from CFS within implementation, although they're no longer part of
sched_cfs_entity (they're in the general sched_entity).

This commit provides some macros that makes the code which
deals with them a bit less cumbersome.

Signed-off-by: Dario Faggioli <raistlin@...ux.it>
---
 kernel/sched_fair.c |  111 ++++++++++++++++++++++++++-------------------------
 1 files changed, 56 insertions(+), 55 deletions(-)

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index e9b8260..0c3006e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -107,6 +107,11 @@ static inline struct sched_entity *se_of_cfs_se(struct sched_cfs_entity *cfs_se)
 	return container_of(cfs_se, struct sched_entity, cfs);
 }
 
+static inline int cfs_se_on_rq(struct sched_cfs_entity *cfs_se)
+{
+	return se_of_cfs_se(cfs_se)->on_rq;
+}
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
 /* cpu runqueue to which this cfs_rq is attached */
@@ -455,6 +460,8 @@ static struct sched_cfs_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
  * Scheduling class statistics methods:
  */
 
+#define cfs_se_statistics(cfs_se)	((se_of_cfs_se(cfs_se))->statistics)
+
 #ifdef CONFIG_SCHED_DEBUG
 int sched_proc_update_handler(struct ctl_table *table, int write,
 		void __user *buffer, size_t *lenp,
@@ -521,8 +528,7 @@ static u64 __sched_period(unsigned long nr_running)
  */
 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
 {
-	struct sched_entity *se = se_of_cfs_se(cfs_se);
-	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
+	u64 slice = __sched_period(cfs_rq->nr_running + !cfs_se_on_rq(cfs_se));
 
 	for_each_sched_cfs_entity(cfs_se) {
 		struct load_weight *load;
@@ -531,7 +537,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
 		cfs_rq = cfs_rq_of(cfs_se);
 		load = &cfs_rq->load;
 
-		if (unlikely(!se_of_cfs_se(cfs_se)->on_rq)) {
+		if (unlikely(!cfs_se_on_rq(cfs_se))) {
 			lw = cfs_rq->load;
 
 			update_load_add(&lw, cfs_se->load.weight);
@@ -564,13 +570,12 @@ static inline void
 __update_curr(struct cfs_rq *cfs_rq, struct sched_cfs_entity *curr,
 	      unsigned long delta_exec)
 {
-	struct sched_entity *curr_se = se_of_cfs_se(curr);
 	unsigned long delta_exec_weighted;
 
-	schedstat_set(curr_se->statistics.exec_max,
-		      max((u64)delta_exec, curr_se->statistics.exec_max));
+	schedstat_set(cfs_se_statistics(curr).exec_max,
+		      max((u64)delta_exec, cfs_se_statistics(curr).exec_max));
 
-	curr_se->sum_exec_runtime += delta_exec;
+	se_of_cfs_se(curr)->sum_exec_runtime += delta_exec;
 	schedstat_add(cfs_rq, exec_clock, delta_exec);
 	delta_exec_weighted = calc_delta_fair(delta_exec, curr);
 
@@ -616,7 +621,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
 static inline void
 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
 {
-	schedstat_set(se_of_cfs_se(cfs_se)->statistics.wait_start,
+	schedstat_set(cfs_se_statistics(cfs_se).wait_start,
 		      rq_of(cfs_rq)->clock);
 }
 
@@ -637,20 +642,20 @@ update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
 static void
 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
 {
+	schedstat_set(cfs_se_statistics(cfs_se).wait_max,
+		      max(cfs_se_statistics(cfs_se).wait_max,
+		      rq_of(cfs_rq)->clock - cfs_se_statistics(cfs_se).wait_start));
+	schedstat_set(cfs_se_statistics(cfs_se).wait_count,
+		      cfs_se_statistics(cfs_se).wait_count+1);
+	schedstat_set(cfs_se_statistics(cfs_se).wait_sum,
+		      cfs_se_statistics(cfs_se).wait_sum +
+		      rq_of(cfs_rq)->clock - cfs_se_statistics(cfs_se).wait_start);
 #ifdef CONFIG_SCHEDSTATS
-	/* #ifdef-ing like this suppresses a gcc warning */
-	struct sched_entity *se = se_of_cfs_se(cfs_se);
-
-	schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
-		      rq_of(cfs_rq)->clock - se->statistics.wait_start));
-	schedstat_set(se->statistics.wait_count, se->statistics.wait_count+1);
-	schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
-		      rq_of(cfs_rq)->clock - se->statistics.wait_start);
 	if (cfs_entity_is_task(cfs_se)) {
 		trace_sched_stat_wait(cfs_task_of(cfs_se), rq_of(cfs_rq)->clock -
-				      se->statistics.wait_start);
+				      cfs_se_statistics(cfs_se).wait_start);
 	}
-	schedstat_set(se->statistics.wait_start, 0);
+	schedstat_set(cfs_se_statistics(cfs_se).wait_start, 0);
 #endif
 }
 
@@ -787,9 +792,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq,
 			    struct sched_cfs_entity *cfs_se,
 			    unsigned long weight)
 {
-	struct sched_entity *se = se_of_cfs_se(cfs_se);
-
-	if (se->on_rq) {
+	if (cfs_se_on_rq(cfs_se)) {
 		/* commit outstanding execution time */
 		if (cfs_rq->curr == cfs_se)
 			update_curr(cfs_rq);
@@ -798,7 +801,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq,
 
 	update_load_set(&cfs_se->load, weight);
 
-	if (se->on_rq)
+	if (cfs_se_on_rq(cfs_se))
 		account_entity_enqueue(cfs_rq, cfs_se);
 }
 
@@ -860,44 +863,45 @@ enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
 {
 #ifdef CONFIG_SCHEDSTATS
 	struct task_struct *tsk = NULL;
-	struct sched_entity *se = se_of_cfs_se(cfs_se);
 
 	if (cfs_entity_is_task(cfs_se))
 		tsk = cfs_task_of(cfs_se);
 
-	if (se->statistics.sleep_start) {
-		u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
+	if (cfs_se_statistics(cfs_se).sleep_start) {
+		u64 delta = rq_of(cfs_rq)->clock -
+			    cfs_se_statistics(cfs_se).sleep_start;
 
 		if ((s64)delta < 0)
 			delta = 0;
 
-		if (unlikely(delta > se->statistics.sleep_max))
-			se->statistics.sleep_max = delta;
+		if (unlikely(delta > cfs_se_statistics(cfs_se).sleep_max))
+			cfs_se_statistics(cfs_se).sleep_max = delta;
 
-		se->statistics.sleep_start = 0;
-		se->statistics.sum_sleep_runtime += delta;
+		cfs_se_statistics(cfs_se).sleep_start = 0;
+		cfs_se_statistics(cfs_se).sum_sleep_runtime += delta;
 
 		if (tsk) {
 			account_scheduler_latency(tsk, delta >> 10, 1);
 			trace_sched_stat_sleep(tsk, delta);
 		}
 	}
-	if (se->statistics.block_start) {
-		u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
+	if (cfs_se_statistics(cfs_se).block_start) {
+		u64 delta = rq_of(cfs_rq)->clock -
+			    cfs_se_statistics(cfs_se).block_start;
 
 		if ((s64)delta < 0)
 			delta = 0;
 
-		if (unlikely(delta > se->statistics.block_max))
-			se->statistics.block_max = delta;
+		if (unlikely(delta > cfs_se_statistics(cfs_se).block_max))
+			cfs_se_statistics(cfs_se).block_max = delta;
 
-		se->statistics.block_start = 0;
-		se->statistics.sum_sleep_runtime += delta;
+		cfs_se_statistics(cfs_se).block_start = 0;
+		cfs_se_statistics(cfs_se).sum_sleep_runtime += delta;
 
 		if (tsk) {
 			if (tsk->in_iowait) {
-				se->statistics.iowait_sum += delta;
-				se->statistics.iowait_count++;
+				cfs_se_statistics(cfs_se).iowait_sum += delta;
+				cfs_se_statistics(cfs_se).iowait_count++;
 				trace_sched_stat_iowait(tsk, delta);
 			}
 
@@ -1031,12 +1035,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se,
 #ifdef CONFIG_SCHEDSTATS
 		if (cfs_entity_is_task(cfs_se)) {
 			struct task_struct *tsk = cfs_task_of(cfs_se);
-			struct sched_entity *se = se_of_cfs_se(cfs_se);
 
 			if (tsk->state & TASK_INTERRUPTIBLE)
-				se->statistics.sleep_start = rq_of(cfs_rq)->clock;
+				cfs_se_statistics(cfs_se).sleep_start = rq_of(cfs_rq)->clock;
 			if (tsk->state & TASK_UNINTERRUPTIBLE)
-				se->statistics.block_start = rq_of(cfs_rq)->clock;
+				cfs_se_statistics(cfs_se).block_start = rq_of(cfs_rq)->clock;
 		}
 #endif
 	}
@@ -1104,10 +1107,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_cfs_entity *curr)
 static void
 set_next_entity(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
 {
-	struct sched_entity *se = se_of_cfs_se(cfs_se);
-
 	/* 'current' is not kept within the tree. */
-	if (se->on_rq) {
+	if (cfs_se_on_rq(cfs_se)) {
 		/*
 		 * Any task has to be enqueued before it get to execute on
 		 * a CPU. So account for the time it spent waiting on the
@@ -1126,11 +1127,13 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_cfs_entity *cfs_se)
 	 * when there are only lesser-weight tasks around):
 	 */
 	if (rq_of(cfs_rq)->load.weight >= 2*cfs_se->load.weight) {
-		se->statistics.slice_max = max(se->statistics.slice_max,
-			se->sum_exec_runtime - se->prev_sum_exec_runtime);
+		cfs_se_statistics(cfs_se).slice_max = max(cfs_se_statistics(cfs_se).slice_max,
+			se_of_cfs_se(cfs_se)->sum_exec_runtime -
+			se_of_cfs_se(cfs_se)->prev_sum_exec_runtime);
 	}
 #endif
-	se->prev_sum_exec_runtime = se->sum_exec_runtime;
+	se_of_cfs_se(cfs_se)->prev_sum_exec_runtime =
+				se_of_cfs_se(cfs_se)->sum_exec_runtime;
 }
 
 static int wakeup_preempt_entity(struct sched_cfs_entity *curr,
@@ -1158,17 +1161,15 @@ static struct sched_cfs_entity *pick_next_entity(struct cfs_rq *cfs_rq)
 static void
 put_prev_entity(struct cfs_rq *cfs_rq, struct sched_cfs_entity *prev)
 {
-	struct sched_entity *prev_se = se_of_cfs_se(prev);
-
 	/*
 	 * If still on the runqueue then deactivate_task()
 	 * was not called and update_curr() has to be done:
 	 */
-	if (prev_se->on_rq)
+	if (cfs_se_on_rq(prev))
 		update_curr(cfs_rq);
 
 	check_spread(cfs_rq, prev);
-	if (prev_se->on_rq) {
+	if (cfs_se_on_rq(prev)) {
 		update_stats_wait_start(cfs_rq, prev);
 		/* Put 'current' back into the tree. */
 		__enqueue_entity(cfs_rq, prev);
@@ -1279,11 +1280,11 @@ static inline void hrtick_update(struct rq *rq)
 static void
 enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 {
-	struct cfs_rq *cfs_rq;
 	struct sched_cfs_entity *cfs_se = &p->se.cfs;
+	struct cfs_rq *cfs_rq;
 
 	for_each_sched_cfs_entity(cfs_se) {
-		if (se_of_cfs_se(cfs_se)->on_rq)
+		if (cfs_se_on_rq(cfs_se))
 			break;
 		cfs_rq = cfs_rq_of(cfs_se);
 		enqueue_entity(cfs_rq, cfs_se, flags);
@@ -1505,7 +1506,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
 	if (sync && balanced)
 		return 1;
 
-	schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
+	schedstat_inc(p, cfs_se_statistics(cfs_se).nr_wakeups_affine_attempts);
 	tl_per_task = cpu_avg_load_per_task(this_cpu);
 
 	if (balanced ||
@@ -1517,7 +1518,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
 		 * there is no bad imbalance.
 		 */
 		schedstat_inc(sd, ttwu_move_affine);
-		schedstat_inc(p, se.statistics.nr_wakeups_affine);
+		schedstat_inc(p, cfs_se_statistics(cfs_se).nr_wakeups_affine);
 
 		return 1;
 	}
@@ -1902,7 +1903,7 @@ preempt:
 	 * Also, during early boot the idle thread is in the fair class,
 	 * for obvious reasons its a bad idea to schedule back to it.
 	 */
-	if (unlikely(!se_of_cfs_se(cfs_se)->on_rq || curr == rq->idle))
+	if (unlikely(!cfs_se_on_rq(cfs_se) || curr == rq->idle))
 		return;
 
 	if (sched_feat(LAST_BUDDY) && scale && cfs_entity_is_task(cfs_se))
-- 
1.7.2.3

-- 
<<This happens because I choose it to happen!>> (Raistlin Majere)
----------------------------------------------------------------------
Dario Faggioli, ReTiS Lab, Scuola Superiore Sant'Anna, Pisa  (Italy)

http://retis.sssup.it/people/faggioli -- dario.faggioli@...ber.org

Download attachment "signature.asc" of type "application/pgp-signature" (199 bytes)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ