[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <174413913226.31282.16525753339504827738.tip-bot2@tip-bot2>
Date: Tue, 08 Apr 2025 19:05:32 -0000
From: tip-bot2 for Michal Koutný <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: mkoutny@...e.com, "Peter Zijlstra (Intel)" <peterz@...radead.org>,
x86@...nel.org, linux-kernel@...r.kernel.org
Subject:
[tip: sched/core] sched: Add RT_GROUP WARN checks for non-root task_groups
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 87f1fb77d87a6dac9968a321bb10799ae6d2039c
Gitweb: https://git.kernel.org/tip/87f1fb77d87a6dac9968a321bb10799ae6d2039c
Author: Michal Koutný <mkoutny@...e.com>
AuthorDate: Mon, 10 Mar 2025 18:04:40 +01:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Tue, 08 Apr 2025 20:55:54 +02:00
sched: Add RT_GROUP WARN checks for non-root task_groups
With CONFIG_RT_GROUP_SCHED but runtime disabling of RT_GROUPs we expect
the existence of the root task_group only and all rt_sched_entity'ies
should be queued on root's rt_rq.
If we get a non-root RT_GROUP something went wrong.
Signed-off-by: Michal Koutný <mkoutny@...e.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Link: https://lkml.kernel.org/r/20250310170442.504716-9-mkoutny@suse.com
---
kernel/sched/rt.c | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index b611934..778911b 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -176,11 +176,14 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
+ /* Cannot fold with non-CONFIG_RT_GROUP_SCHED version, layout */
+ WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
return rt_rq->rq;
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
+ WARN_ON(!rt_group_sched_enabled() && rt_se->rt_rq->tg != &root_task_group);
return rt_se->rt_rq;
}
@@ -188,6 +191,7 @@ static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_se->rt_rq;
+ WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
return rt_rq->rq;
}
@@ -504,8 +508,10 @@ typedef struct task_group *rt_rq_iter_t;
static inline struct task_group *next_task_group(struct task_group *tg)
{
- if (!rt_group_sched_enabled())
+ if (!rt_group_sched_enabled()) {
+ WARN_ON(tg != &root_task_group);
return NULL;
+ }
do {
tg = list_entry_rcu(tg->list.next,
@@ -2607,8 +2613,9 @@ static int task_is_throttled_rt(struct task_struct *p, int cpu)
{
struct rt_rq *rt_rq;
-#ifdef CONFIG_RT_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED // XXX maybe add task_rt_rq(), see also sched_rt_period_rt_rq
rt_rq = task_group(p)->rt_rq[cpu];
+ WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
#else
rt_rq = &cpu_rq(cpu)->rt;
#endif
@@ -2718,6 +2725,9 @@ static int tg_rt_schedulable(struct task_group *tg, void *data)
tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
return -EBUSY;
+ if (WARN_ON(!rt_group_sched_enabled() && tg != &root_task_group))
+ return -EBUSY;
+
total = to_ratio(period, runtime);
/*
Powered by blists - more mailing lists