lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230328110354.712296502@infradead.org>
Date:   Tue, 28 Mar 2023 11:26:38 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     mingo@...nel.org, vincent.guittot@...aro.org
Cc:     linux-kernel@...r.kernel.org, peterz@...radead.org,
        juri.lelli@...hat.com, dietmar.eggemann@....com,
        rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
        bristot@...hat.com, corbet@....net, qyousef@...alina.io,
        chris.hyser@...cle.com, patrick.bellasi@...bug.net, pjt@...gle.com,
        pavel@....cz, qperret@...gle.com, tim.c.chen@...ux.intel.com,
        joshdon@...gle.com, timj@....org, kprateek.nayak@....com,
        yu.c.chen@...el.com, youssefesmat@...omium.org,
        joel@...lfernandes.org, efault@....de
Subject: [PATCH 16/17] [RFC] sched/eevdf: Minimal vavg option

Alternative means of tracking min_vruntime to minimize the deltas
going into avg_vruntime -- note that because vavg move backwards this
is all sorts of tricky.

Also more expensive because of extra divisions... Not found this
convincing.

Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
 kernel/sched/fair.c     |   51 ++++++++++++++++++++++++++++--------------------
 kernel/sched/features.h |    2 +
 2 files changed, 32 insertions(+), 21 deletions(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -732,28 +732,37 @@ static u64 __update_min_vruntime(struct
 
 static void update_min_vruntime(struct cfs_rq *cfs_rq)
 {
-	struct sched_entity *se = __pick_first_entity(cfs_rq);
-	struct sched_entity *curr = cfs_rq->curr;
-
-	u64 vruntime = cfs_rq->min_vruntime;
-
-	if (curr) {
-		if (curr->on_rq)
-			vruntime = curr->vruntime;
-		else
-			curr = NULL;
+	if (sched_feat(MINIMAL_VA)) {
+		u64 vruntime = avg_vruntime(cfs_rq);
+		s64 delta = (s64)(vruntime - cfs_rq->min_vruntime);
+
+		avg_vruntime_update(cfs_rq, delta);
+
+		u64_u32_store(cfs_rq->min_vruntime, vruntime);
+	} else {
+		struct sched_entity *se = __pick_first_entity(cfs_rq);
+		struct sched_entity *curr = cfs_rq->curr;
+
+		u64 vruntime = cfs_rq->min_vruntime;
+
+		if (curr) {
+			if (curr->on_rq)
+				vruntime = curr->vruntime;
+			else
+				curr = NULL;
+		}
+
+		if (se) {
+			if (!curr)
+				vruntime = se->vruntime;
+			else
+				vruntime = min_vruntime(vruntime, se->vruntime);
+		}
+
+		/* ensure we never gain time by being placed backwards. */
+		u64_u32_store(cfs_rq->min_vruntime,
+				__update_min_vruntime(cfs_rq, vruntime));
 	}
-
-	if (se) {
-		if (!curr)
-			vruntime = se->vruntime;
-		else
-			vruntime = min_vruntime(vruntime, se->vruntime);
-	}
-
-	/* ensure we never gain time by being placed backwards. */
-	u64_u32_store(cfs_rq->min_vruntime,
-		      __update_min_vruntime(cfs_rq, vruntime));
 }
 
 static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -9,6 +9,8 @@ SCHED_FEAT(PLACE_FUDGE, true)
 SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
 SCHED_FEAT(PLACE_BONUS, false)
 
+SCHED_FEAT(MINIMAL_VA, false)
+
 /*
  * Prefer to schedule the task we woke last (assuming it failed
  * wakeup-preemption), since its likely going to consume data we


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ