lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210816154401.23919-1-tao.zhou@linux.dev>
Date:   Mon, 16 Aug 2021 23:44:01 +0800
From:   Tao Zhou <tao.zhou@...ux.dev>
To:     linux-kernel@...r.kernel.org
Cc:     peterz@...radead.org, tglx@...utronix.de, joel@...lfernandes.org,
        chris.hyser@...cle.com, joshdon@...gle.com, mingo@...nel.org,
        vincent.guittot@...aro.org, valentin.schneider@....com,
        mgorman@...e.de, tao.zhou@...ux.dev
Subject: [PATCH] sched/core: An optimization of pick_next_task() not sure

When find a new candidate max, wipe the stale and start over.
Goto again: and use the new max to loop to pick the the task.

Here first want to get the max of the core and use this new
max to loop once to pick the task on each thread.

Not sure this is an optimization and just stop here a little
and move on..

Compiled.
---
 kernel/sched/core.c | 52 +++++++++++++++++----------------------------
 1 file changed, 20 insertions(+), 32 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 20ffcc044134..bddcd328df96 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5403,7 +5403,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 	const struct sched_class *class;
 	const struct cpumask *smt_mask;
 	bool fi_before = false;
-	int i, j, cpu, occ = 0;
+	int i, cpu, occ = 0;
 	bool need_sync;
 
 	if (!sched_core_enabled(rq))
@@ -5508,11 +5508,27 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 	 * order.
 	 */
 	for_each_class(class) {
-again:
+		struct rq *rq_i;
+		struct task_struct *p;
+
 		for_each_cpu_wrap(i, smt_mask, cpu) {
-			struct rq *rq_i = cpu_rq(i);
-			struct task_struct *p;
+			rq_i = cpu_rq(i);
+			p = pick_task(rq_i, class, max, fi_before);
+			/*
+			 * If this new candidate is of higher priority than the
+			 * previous; and they're incompatible; pick_task makes
+			 * sure that p's priority is more than max if it doesn't
+			 * match max's cookie. Update max.
+			 *
+			 * NOTE: this is a linear max-filter and is thus bounded
+			 * in execution time.
+			 */
+			if (!max || !cookie_match(max, p))
+				max = p;
+		}
 
+		for_each_cpu_wrap(i, smt_mask, cpu) {
+			rq_i = cpu_rq(i);
 			if (rq_i->core_pick)
 				continue;
 
@@ -5536,34 +5552,6 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 					rq->core->core_forceidle_seq++;
 			}
 
-			/*
-			 * If this new candidate is of higher priority than the
-			 * previous; and they're incompatible; we need to wipe
-			 * the slate and start over. pick_task makes sure that
-			 * p's priority is more than max if it doesn't match
-			 * max's cookie.
-			 *
-			 * NOTE: this is a linear max-filter and is thus bounded
-			 * in execution time.
-			 */
-			if (!max || !cookie_match(max, p)) {
-				struct task_struct *old_max = max;
-
-				rq->core->core_cookie = p->core_cookie;
-				max = p;
-
-				if (old_max) {
-					rq->core->core_forceidle = false;
-					for_each_cpu(j, smt_mask) {
-						if (j == i)
-							continue;
-
-						cpu_rq(j)->core_pick = NULL;
-					}
-					occ = 1;
-					goto again;
-				}
-			}
 		}
 	}
 
-- 
2.31.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ