lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250702121159.050144163@infradead.org>
Date: Wed, 02 Jul 2025 13:49:31 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...hat.com,
 juri.lelli@...hat.com,
 vincent.guittot@...aro.org,
 dietmar.eggemann@....com,
 rostedt@...dmis.org,
 bsegall@...gle.com,
 mgorman@...e.de,
 vschneid@...hat.com,
 clm@...a.com
Cc: linux-kernel@...r.kernel.org,
 peterz@...radead.org
Subject: [PATCH v2 07/12] psi: Split psi_ttwu_dequeue()

Currently psi_ttwu_dequeue() is called while holding p->pi_lock and
takes rq->lock. Split the function in preparation for calling
ttwu_do_migration() while already holding rq->lock.

Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
 kernel/sched/core.c  |   18 ++++++++++++++----
 kernel/sched/stats.h |   24 +++++++++++++-----------
 2 files changed, 27 insertions(+), 15 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3774,17 +3774,27 @@ static int ttwu_runnable(struct task_str
 	return 1;
 }
 
-static inline bool ttwu_do_migrate(struct task_struct *p, int cpu)
+static inline bool ttwu_do_migrate(struct rq *rq, struct task_struct *p, int cpu)
 {
+	struct rq *p_rq = rq ? : task_rq(p);
+
 	if (task_cpu(p) == cpu)
 		return false;
 
 	if (p->in_iowait) {
 		delayacct_blkio_end(p);
-		atomic_dec(&task_rq(p)->nr_iowait);
+		atomic_dec(&p_rq->nr_iowait);
 	}
 
-	psi_ttwu_dequeue(p);
+	if (psi_ttwu_need_dequeue(p)) {
+		if (rq) {
+			lockdep_assert(task_rq(p) == rq);
+			__psi_ttwu_dequeue(p);
+		} else {
+			guard(__task_rq_lock)(p);
+			__psi_ttwu_dequeue(p);
+		}
+	}
 	set_task_cpu(p, cpu);
 	return true;
 }
@@ -4283,7 +4293,7 @@ int try_to_wake_up(struct task_struct *p
 		 * their previous state and preserve Program Order.
 		 */
 		smp_cond_load_acquire(&p->on_cpu, !VAL);
-		if (ttwu_do_migrate(p, cpu))
+		if (ttwu_do_migrate(NULL, p, cpu))
 			wake_flags |= WF_MIGRATED;
 
 		ttwu_queue(p, cpu, wake_flags);
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -191,23 +191,24 @@ static inline void psi_dequeue(struct ta
 	psi_task_change(p, p->psi_flags, 0);
 }
 
-static inline void psi_ttwu_dequeue(struct task_struct *p)
+static inline bool psi_ttwu_need_dequeue(struct task_struct *p)
 {
 	if (static_branch_likely(&psi_disabled))
-		return;
+		return false;
 	/*
 	 * Is the task being migrated during a wakeup? Make sure to
 	 * deregister its sleep-persistent psi states from the old
 	 * queue, and let psi_enqueue() know it has to requeue.
 	 */
-	if (unlikely(p->psi_flags)) {
-		struct rq_flags rf;
-		struct rq *rq;
-
-		rq = __task_rq_lock(p, &rf);
-		psi_task_change(p, p->psi_flags, 0);
-		__task_rq_unlock(rq, &rf);
-	}
+	if (!likely(!p->psi_flags))
+		return false;
+
+	return true;
+}
+
+static inline void __psi_ttwu_dequeue(struct task_struct *p)
+{
+	psi_task_change(p, p->psi_flags, 0);
 }
 
 static inline void psi_sched_switch(struct task_struct *prev,
@@ -223,7 +224,8 @@ static inline void psi_sched_switch(stru
 #else /* !CONFIG_PSI: */
 static inline void psi_enqueue(struct task_struct *p, bool migrate) {}
 static inline void psi_dequeue(struct task_struct *p, bool migrate) {}
-static inline void psi_ttwu_dequeue(struct task_struct *p) {}
+static inline bool psi_ttwu_need_dequeue(struct task_struct *p) { return false; }
+static inline void __psi_ttwu_dequeue(struct task_struct *p) {}
 static inline void psi_sched_switch(struct task_struct *prev,
 				    struct task_struct *next,
 				    bool sleep) {}



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ