lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ced96acd54644325b77c2d8f9fcda658@honor.com>
Date: Sat, 28 Jun 2025 06:50:32 +0000
From: liuwenfang <liuwenfang@...or.com>
To: 'Tejun Heo' <tj@...nel.org>
CC: 'David Vernet' <void@...ifault.com>, 'Andrea Righi' <arighi@...dia.com>,
	'Changwoo Min' <changwoo@...lia.com>, 'Ingo Molnar' <mingo@...hat.com>,
	'Peter Zijlstra' <peterz@...radead.org>, 'Juri Lelli'
	<juri.lelli@...hat.com>, 'Vincent Guittot' <vincent.guittot@...aro.org>,
	'Dietmar Eggemann' <dietmar.eggemann@....com>, 'Steven Rostedt'
	<rostedt@...dmis.org>, 'Ben Segall' <bsegall@...gle.com>, 'Mel Gorman'
	<mgorman@...e.de>, 'Valentin Schneider' <vschneid@...hat.com>,
	"'linux-kernel@...r.kernel.org'" <linux-kernel@...r.kernel.org>
Subject: [PATCH v2 1/2] sched_ext: Fix cpu_released while RT task and SCX task
 are scheduled concurrently

Supposed RT task(RT1) is running on CPU0 and RT task(RT2) is awakened on CPU1,
RT1 becomes sleep and SCX task(SCX1) will be dispatched to CPU0, RT2 will be
placed on CPU0:

CPU0(schedule)                                     CPU1(try_to_wake_up)
set_current_state(TASK_INTERRUPTIBLE)              try_to_wake_up # RT2
__schedule                                           select_task_rq # CPU0 is selected
LOCK rq(0)->lock # lock CPU0 rq                        ttwu_queue
  deactivate_task # RT1                                  LOCK rq(0)->lock # busy waiting
    pick_next_task # no more RT tasks on rq                 |
      prev_balance                                          |
        balance_scx                                         |
          balance_one                                       |
            rq->scx.cpu_released = false;                   |
              consume_global_dsq                            |
                consume_dispatch_q                          |
                  consume_remote_task                       |
                    UNLOCK rq(0)->lock                      V
                                                         LOCK rq(0)->lock # succ
                    deactivate_task # SCX1               ttwu_do_activate
                    LOCK rq(0)->lock # busy waiting      activate_task # RT2 equeued
                       |                                 UNLOCK rq(0)->lock
                       V
                    LOCK rq(0)->lock # succ
                    activate_task # SCX1
      pick_task # RT2 is picked
      put_prev_set_next_task # prev is RT1, next is RT2, rq->scx.cpu_released = false;
UNLOCK rq(0)->lock

At last, RT2 will be running on CPU0 with rq->scx.cpu_released being false!

So, Add the scx_next_task_picked () and check sched class again to fix the value
of rq->scx.cpu_released.

Signed-off-by: l00013971 <l00013971@...onor.com>
---
 kernel/sched/ext.c   | 24 +++++++++++++++++-------
 kernel/sched/sched.h |  5 +++++
 2 files changed, 22 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index f5133249f..f161156be 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -3187,7 +3187,7 @@ preempt_reason_from_class(const struct sched_class *class)
 	return SCX_CPU_PREEMPT_UNKNOWN;
 }
 
-static void switch_class(struct rq *rq, struct task_struct *next)
+static void switch_class(struct rq *rq, struct task_struct *next, bool prev_on_scx)
 {
 	const struct sched_class *next_class = next->sched_class;
 
@@ -3197,7 +3197,8 @@ static void switch_class(struct rq *rq, struct task_struct *next)
 	 * kick_cpus_irq_workfn() who is waiting for this CPU to perform a
 	 * resched.
 	 */
-	smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
+	if (prev_on_scx)
+		smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1);
 #endif
 	if (!static_branch_unlikely(&scx_ops_cpu_preempt))
 		return;
@@ -3233,6 +3234,19 @@ static void switch_class(struct rq *rq, struct task_struct *next)
 	}
 }
 
+void scx_next_task_picked(struct rq *rq, struct task_struct *prev,
+			  struct task_struct *next)
+{
+	bool prev_on_scx = prev && (prev->sched_class == &ext_sched_class);
+
+	if (!scx_enabled() ||
+	    !next ||
+	    next->sched_class == &ext_sched_class)
+		return;
+
+	switch_class(rq, next, prev_on_scx);
+}
+
 static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
 			      struct task_struct *next)
 {
@@ -3253,7 +3267,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
 		 */
 		if (p->scx.slice && !scx_rq_bypassing(rq)) {
 			dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD);
-			goto switch_class;
+			return;
 		}
 
 		/*
@@ -3269,10 +3283,6 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
 			do_enqueue_task(rq, p, 0, -1);
 		}
 	}
-
-switch_class:
-	if (next && next->sched_class != &ext_sched_class)
-		switch_class(rq, next);
 }
 
 static struct task_struct *first_local_task(struct rq *rq)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 47972f34e..f8e1b2173 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1738,12 +1738,15 @@ static inline void scx_rq_clock_invalidate(struct rq *rq)
 	WRITE_ONCE(rq->scx.flags, rq->scx.flags & ~SCX_RQ_CLK_VALID);
 }
 
+void scx_next_task_picked(struct rq *rq, struct task_struct *prev, struct task_struct *next);
 #else /* !CONFIG_SCHED_CLASS_EXT */
 #define scx_enabled()		false
 #define scx_switched_all()	false
 
 static inline void scx_rq_clock_update(struct rq *rq, u64 clock) {}
 static inline void scx_rq_clock_invalidate(struct rq *rq) {}
+static inline void scx_next_task_picked(struct rq *rq, struct task_struct *prev,
+					struct task_struct *next) {}
 #endif /* !CONFIG_SCHED_CLASS_EXT */
 
 /*
@@ -2465,6 +2468,8 @@ static inline void put_prev_set_next_task(struct rq *rq,
 
 	__put_prev_set_next_dl_server(rq, prev, next);
 
+	scx_next_task_picked(rq, prev, next);
+
 	if (next == prev)
 		return;
 
-- 
2.17.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ