lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251201124205.11169-29-yurand2000@gmail.com>
Date: Mon,  1 Dec 2025 13:42:01 +0100
From: Yuri Andriaccio <yurand2000@...il.com>
To: Ingo Molnar <mingo@...hat.com>,
	Peter Zijlstra <peterz@...radead.org>,
	Juri Lelli <juri.lelli@...hat.com>,
	Vincent Guittot <vincent.guittot@...aro.org>,
	Dietmar Eggemann <dietmar.eggemann@....com>,
	Steven Rostedt <rostedt@...dmis.org>,
	Ben Segall <bsegall@...gle.com>,
	Mel Gorman <mgorman@...e.de>,
	Valentin Schneider <vschneid@...hat.com>
Cc: linux-kernel@...r.kernel.org,
	Luca Abeni <luca.abeni@...tannapisa.it>,
	Yuri Andriaccio <yuri.andriaccio@...tannapisa.it>
Subject: [RFC PATCH v4 28/28] [DEBUG] sched/rt: Add debug BUG_ONs in migration code.

Add debug BUG_ONs in group specific migration functions.
Can be safely added after all the migration patches.

These are extra asserts which are only useful to debug the kernel code and
are not meant to be part of the final patchset.

Signed-off-by: Yuri Andriaccio <yurand2000@...il.com>
---
 kernel/sched/rt.c | 25 +++++++++++++++++++++++++
 1 file changed, 25 insertions(+)

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 435d147aa5..1a777298f9 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -349,6 +349,9 @@ static void rt_queue_push_from_group(struct rt_rq *rt_rq)
 	struct rq *rq = served_rq_of_rt_rq(rt_rq);
 	struct rq *global_rq = cpu_rq(rq->cpu);

+	BUG_ON(rt_rq == NULL);
+	BUG_ON(rq == global_rq);
+
 	if (global_rq->rq_to_push_from)
 		return;

@@ -366,6 +369,10 @@ static void rt_queue_pull_to_group(struct rt_rq *rt_rq)
 	struct rq *global_rq = cpu_rq(rq->cpu);
 	struct sched_dl_entity *dl_se = dl_group_of(rt_rq);

+	BUG_ON(rt_rq == NULL);
+	BUG_ON(!is_dl_group(rt_rq));
+	BUG_ON(rq == global_rq);
+
 	if (dl_se->dl_throttled || global_rq->rq_to_pull_to)
 		return;

@@ -1393,6 +1400,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
  */
 static int push_rt_task(struct rq *rq, bool pull)
 {
+	BUG_ON(is_dl_group(&rq->rt));
+
 	struct task_struct *next_task;
 	struct rq *lowest_rq;
 	int ret = 0;
@@ -1689,6 +1698,8 @@ void rto_push_irq_work_func(struct irq_work *work)

 static void pull_rt_task(struct rq *this_rq)
 {
+	BUG_ON(is_dl_group(&this_rq->rt));
+
 	int this_cpu = this_rq->cpu, cpu;
 	bool resched = false;
 	struct task_struct *p, *push_task;
@@ -1813,6 +1824,8 @@ static int group_find_lowest_rt_rq(struct task_struct *task, struct rt_rq *task_
 	int prio, lowest_prio;
 	int cpu, this_cpu = smp_processor_id();

+	BUG_ON(task->sched_task_group != task_rt_rq->tg);
+
 	if (task->nr_cpus_allowed == 1)
 		return -1; /* No other targets possible */

@@ -1915,6 +1928,8 @@ static struct rt_rq *group_find_lock_lowest_rt_rq(struct task_struct *task, stru
 	struct sched_dl_entity *lowest_dl_se;
 	int tries, cpu;

+	BUG_ON(task->sched_task_group != rt_rq->tg);
+
 	for (tries = 0; tries < RT_MAX_TRIES; tries++) {
 		cpu = group_find_lowest_rt_rq(task, rt_rq);

@@ -1971,6 +1986,8 @@ static struct rt_rq *group_find_lock_lowest_rt_rq(struct task_struct *task, stru

 static int group_push_rt_task(struct rt_rq *rt_rq, bool pull)
 {
+	BUG_ON(!is_dl_group(rt_rq));
+
 	struct rq *rq = rq_of_rt_rq(rt_rq);
 	struct task_struct *next_task;
 	struct rq *lowest_rq;
@@ -2090,6 +2107,8 @@ static int group_push_rt_task(struct rt_rq *rt_rq, bool pull)

 static void group_pull_rt_task(struct rt_rq *this_rt_rq)
 {
+	BUG_ON(!is_dl_group(this_rt_rq));
+
 	struct rq *this_rq = rq_of_rt_rq(this_rt_rq);
 	int this_cpu = this_rq->cpu, cpu;
 	bool resched = false;
@@ -2202,6 +2221,9 @@ static void group_push_rt_tasks_callback(struct rq *global_rq)
 {
 	struct rt_rq *rt_rq = &global_rq->rq_to_push_from->rt;

+	BUG_ON(global_rq->rq_to_push_from == NULL);
+	BUG_ON(served_rq_of_rt_rq(rt_rq) == global_rq);
+
 	if ((rt_rq->rt_nr_running > 1) ||
 	    (dl_group_of(rt_rq)->dl_throttled == 1)) {

@@ -2216,6 +2238,9 @@ static void group_pull_rt_task_callback(struct rq *global_rq)
 {
 	struct rt_rq *rt_rq = &global_rq->rq_to_pull_to->rt;

+	BUG_ON(global_rq->rq_to_pull_to == NULL);
+	BUG_ON(served_rq_of_rt_rq(rt_rq) == global_rq);
+
 	group_pull_rt_task(rt_rq);
 	global_rq->rq_to_pull_to = NULL;
 }
--
2.51.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ