lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180907214047.26914-30-jschoenh@amazon.de>
Date:   Fri,  7 Sep 2018 23:40:16 +0200
From:   Jan H. Schönherr <jschoenh@...zon.de>
To:     Ingo Molnar <mingo@...hat.com>,
        Peter Zijlstra <peterz@...radead.org>
Cc:     Jan H. Schönherr <jschoenh@...zon.de>,
        linux-kernel@...r.kernel.org
Subject: [RFC 29/60] cosched: Adjust code reflecting on the total number of CFS tasks on a CPU

There are a few places that make decisions based on the total number
of CFS tasks on a certain CPU. With coscheduling, the inspected value
rq->cfs.h_nr_running does not contain all tasks anymore, as some are
accounted on higher hierarchy levels instead. This would lead to
incorrect conclusions as the system seems more idle than it actually is.

Adjust these code paths to use an alternative way of deriving the same
value: take the total amount of tasks on the runqueue and subtract all
running tasks of other scheduling classes. What remains are all CFS tasks
on a certain CPU.

Signed-off-by: Jan H. Schönherr <jschoenh@...zon.de>
---
 kernel/sched/core.c  |  5 ++---
 kernel/sched/fair.c  | 11 +++++------
 kernel/sched/sched.h | 21 +++++++++++++++++++++
 3 files changed, 28 insertions(+), 9 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5350cab7ac4a..337bae6fa836 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3326,12 +3326,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 	/*
 	 * Optimization: we know that if all tasks are in the fair class we can
 	 * call that function directly, but only if the @prev task wasn't of a
-	 * higher scheduling class, because otherwise those loose the
+	 * higher scheduling class, because otherwise those lose the
 	 * opportunity to pull in more work from other CPUs.
 	 */
 	if (likely((prev->sched_class == &idle_sched_class ||
 		    prev->sched_class == &fair_sched_class) &&
-		   rq->nr_running == rq->cfs.h_nr_running)) {
+		   rq->nr_running == nr_cfs_tasks(rq))) {
 
 		p = fair_sched_class.pick_next_task(rq, prev, rf);
 		if (unlikely(p == RETRY_TASK))
@@ -3343,7 +3343,6 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
 		return p;
 	}
-
 again:
 	for_each_class(class) {
 		p = class->pick_next_task(rq, prev, rf);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9cbdd027d449..30e5ff30f442 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4460,7 +4460,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 		add_nr_running(rq, task_delta);
 
 	/* Determine whether we need to wake up potentially idle CPU: */
-	if (rq->curr == rq->idle && rq->cfs.nr_running)
+	if (rq->curr == rq->idle && nr_cfs_tasks(rq))
 		resched_curr(rq);
 }
 
@@ -4937,7 +4937,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 
 	SCHED_WARN_ON(task_rq(p) != rq);
 
-	if (rq->cfs.h_nr_running > 1) {
+	if (nr_cfs_tasks(rq) > 1) {
 		u64 slice = sched_slice(cfs_rq, se);
 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
 		s64 delta = slice - ran;
@@ -9067,8 +9067,7 @@ static void nohz_balancer_kick(struct rq *rq)
 
 	sd = rcu_dereference(rq->sd);
 	if (sd) {
-		if ((rq->cfs.h_nr_running >= 1) &&
-				check_cpu_capacity(rq, sd)) {
+		if ((nr_cfs_tasks(rq) >= 1) && check_cpu_capacity(rq, sd)) {
 			flags = NOHZ_KICK_MASK;
 			goto unlock;
 		}
@@ -9479,7 +9478,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
 	 * have been enqueued in the meantime. Since we're not going idle,
 	 * pretend we pulled a task.
 	 */
-	if (this_rq->cfs.h_nr_running && !pulled_task)
+	if (nr_cfs_tasks(this_rq) && !pulled_task)
 		pulled_task = 1;
 
 	/* Move the next balance forward */
@@ -9487,7 +9486,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
 		this_rq->next_balance = next_balance;
 
 	/* Is there a task of a high priority class? */
-	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
+	if (this_rq->nr_running != nr_cfs_tasks(this_rq))
 		pulled_task = -1;
 
 	if (pulled_task)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5e2d231b1dbf..594eb9489f3d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1691,6 +1691,27 @@ static inline int task_on_rq_migrating(struct task_struct *p)
 	return p->on_rq == TASK_ON_RQ_MIGRATING;
 }
 
+#ifdef CONFIG_COSCHEDULING
+static inline unsigned int nr_cfs_tasks(struct rq *rq)
+{
+	unsigned int total = rq->nr_running;
+
+	/* Deadline and real time tasks */
+	total -= rq->dl.dl_nr_running + rq->rt.rt_nr_running;
+
+	/* Stop task */
+	if (rq->stop && task_on_rq_queued(rq->stop))
+		total--;
+
+	return total;
+}
+#else
+static inline unsigned int nr_cfs_tasks(struct rq *rq)
+{
+	return rq->cfs.h_nr_running;
+}
+#endif
+
 /*
  * wake flags
  */
-- 
2.9.3.1.gcba166c.dirty

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ