lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 19 Aug 2015 15:47:16 +0900
From:	byungchul.park@....com
To:	mingo@...nel.org, peterz@...radead.org
Cc:	linux-kernel@...r.kernel.org, yuyang.du@...el.com,
	Byungchul Park <byungchul.park@....com>
Subject: [PATCH v3 5/5] sched: add two functions for att(det)aching a task to(from) a cfs_rq

From: Byungchul Park <byungchul.park@....com>

this patch introduces two functions for adjusting a task's vruntime
and the target cfs_rq's average load so that redundancy code doing
same things can be simplified. and it provides better readability.

switched_from_fair, switched_to_fair and task_move_group_fair can
use introduced functions and get simplified.

Signed-off-by: Byungchul Park <byungchul.park@....com>
---
 kernel/sched/fair.c |  143 +++++++++++++++++++++++----------------------------
 1 file changed, 63 insertions(+), 80 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3419f6c..242f0d3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7915,21 +7915,29 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
 		check_preempt_curr(rq, p, 0);
 }
 
-static void switched_from_fair(struct rq *rq, struct task_struct *p)
+static void detach_task_cfs_rq(struct task_struct *p, int queued)
 {
 	struct sched_entity *se = &p->se;
 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
 	/*
-	 * Ensure the task's vruntime is normalized, so that when it's
-	 * switched back to the fair class the enqueue_entity(.flags=0) will
-	 * do the right thing.
+	 * Ensure the task's vruntime is normalized, so that after attaching
+	 * back it to a cfs_rq the enqueue_entity() will do the right thing.
 	 *
 	 * If it's queued, then the dequeue_entity(.flags=0) will already
 	 * have normalized the vruntime, if it's !queued, then only when
-	 * the task is sleeping will it still have non-normalized vruntime.
+	 * the task is sleeping it still has a non-normalized vruntime.
+	 *
+	 * When !queued, vruntime of the task has usually NOT been normalized.
+	 * But there are some cases where it has already been normalized:
+	 *
+	 * - Moving a forked child which is waiting for being woken up by
+	 *   wake_up_new_task().
+	 * - Moving a task which has been woken up by try_to_wake_up() and
+	 *   waiting for actually being woken up by sched_ttwu_pending().
 	 */
-	if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
+	if (!queued && p->state != TASK_RUNNING &&
+	    se->sum_exec_runtime && p->state != TASK_WAKING) {
 		/*
 		 * Fix up our vruntime so that the current sleep doesn't
 		 * cause 'unlimited' sleep bonus.
@@ -7944,12 +7952,10 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
 #endif
 }
 
-/*
- * We switched to the sched_fair class.
- */
-static void switched_to_fair(struct rq *rq, struct task_struct *p)
+static void attach_task_cfs_rq(struct task_struct *p, int queued)
 {
 	struct sched_entity *se = &p->se;
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	/*
@@ -7961,34 +7967,57 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
 
 #ifdef CONFIG_SMP
 	/* synchronize task with its cfs_rq */
-	attach_entity_load_avg(cfs_rq_of(&p->se), &p->se);
+	attach_entity_load_avg(cfs_rq, se);
 #endif
 
-	if (!task_on_rq_queued(p)) {
+	/*
+	 * Ensure the task has a non-normalized vruntime when attaching it
+	 * to a cfs_rq, so that enqueue_entity() at wake-up time will do
+	 * the right thing.
+	 *
+	 * If it's queued, then the enqueue_entity(.flags=0) makes the task
+	 * has non-normalized vruntime, if it's !queued, then it still has
+	 * a normalized vruntime.
+	 *
+	 * When !queued, a task usually should have a non-normalized vruntime
+	 * after being attached to a cfs_rq. But there are some cases where
+	 * we should keep it normalized:
+	 *
+	 * - Moving a forked child which is waiting for being woken up by
+	 *   wake_up_new_task().
+	 * - Moving a task which has been woken up by try_to_wake_up() and
+	 *   waiting for actually being woken up by sched_ttwu_pending().
+	 */
+	if (!queued && p->state != TASK_RUNNING &&
+	    se->sum_exec_runtime && p->state != TASK_WAKING)
+		se->vruntime += cfs_rq->min_vruntime;
+}
+
+static void switched_from_fair(struct rq *rq, struct task_struct *p)
+{
+	detach_task_cfs_rq(p, task_on_rq_queued(p));
+}
 
+/*
+ * We switched to the sched_fair class.
+ */
+static void switched_to_fair(struct rq *rq, struct task_struct *p)
+{
+	int queued = task_on_rq_queued(p);
+
+	attach_task_cfs_rq(p, queued);
+
+	if (queued) {
 		/*
-		 * Ensure the task has a non-normalized vruntime when it is switched
-		 * back to the fair class with !queued, so that enqueue_entity() at
-		 * wake-up time will do the right thing.
-		 *
-		 * If it's queued, then the enqueue_entity(.flags=0) makes the task
-		 * has non-normalized vruntime, if it's !queued, then it still has
-		 * normalized vruntime.
+		 * We were most likely switched from sched_rt, so
+		 * kick off the schedule if running, otherwise just see
+		 * if we can still preempt the current task.
 		 */
-		if (p->state != TASK_RUNNING)
-			se->vruntime += cfs_rq_of(se)->min_vruntime;
-		return;
+		if (rq->curr == p)
+			resched_curr(rq);
+		else
+			check_preempt_curr(rq, p, 0);
 	}
-
-	/*
-	 * We were most likely switched from sched_rt, so
-	 * kick off the schedule if running, otherwise just see
-	 * if we can still preempt the current task.
-	 */
-	if (rq->curr == p)
-		resched_curr(rq);
-	else
-		check_preempt_curr(rq, p, 0);
 }
 
 /* Account for a task changing its policy or group.
@@ -8025,58 +8054,12 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static void task_move_group_fair(struct task_struct *p, int queued)
 {
-	struct sched_entity *se = &p->se;
-	struct cfs_rq *cfs_rq;
-
-	/*
-	 * If the task was not on the rq at the time of this cgroup movement
-	 * it must have been asleep, sleeping tasks keep their ->vruntime
-	 * absolute on their old rq until wakeup (needed for the fair sleeper
-	 * bonus in place_entity()).
-	 *
-	 * If it was on the rq, we've just 'preempted' it, which does convert
-	 * ->vruntime to a relative base.
-	 *
-	 * Make sure both cases convert their relative position when migrating
-	 * to another cgroup's rq. This does somewhat interfere with the
-	 * fair sleeper stuff for the first placement, but who cares.
-	 */
-	/*
-	 * When !queued, vruntime of the task has usually NOT been normalized.
-	 * But there are some cases where it has already been normalized:
-	 *
-	 * - Moving a forked child which is waiting for being woken up by
-	 *   wake_up_new_task().
-	 * - Moving a task which has been woken up by try_to_wake_up() and
-	 *   waiting for actually being woken up by sched_ttwu_pending().
-	 *
-	 * To prevent boost or penalty in the new cfs_rq caused by delta
-	 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
-	 */
-	if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
-		queued = 1;
-
-	cfs_rq = cfs_rq_of(se);
-	if (!queued)
-		se->vruntime -= cfs_rq->min_vruntime;
-
-#ifdef CONFIG_SMP
-	/* synchronize task with its prev cfs_rq */
-	detach_entity_load_avg(cfs_rq, se);
-#endif
+	detach_task_cfs_rq(p, queued);
 	set_task_rq(p, task_cpu(p));
 
 	/* tell se's cfs_rq has been changed */
 	p->se.avg.last_update_time = 0;
-
-	se->depth = se->parent ? se->parent->depth + 1 : 0;
-	cfs_rq = cfs_rq_of(se);
-	if (!queued)
-		se->vruntime += cfs_rq->min_vruntime;
-#ifdef CONFIG_SMP
-	/* Virtually synchronize task with its new cfs_rq */
-	attach_entity_load_avg(cfs_rq, se);
-#endif
+	attach_task_cfs_rq(p, queued);
 }
 
 void free_fair_sched_group(struct task_group *tg)
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ