[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <174413913619.31282.1198558368261148151.tip-bot2@tip-bot2>
Date: Tue, 08 Apr 2025 19:05:36 -0000
From: tip-bot2 for Michal Koutný <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: mkoutny@...e.com, "Peter Zijlstra (Intel)" <peterz@...radead.org>,
x86@...nel.org, linux-kernel@...r.kernel.org
Subject: [tip: sched/core] sched: Always initialize rt_rq's task_group
The following commit has been merged into the sched/core branch of tip:
Commit-ID: a5a25b32c08a31c03258ec4960bec26caaf76e9a
Gitweb: https://git.kernel.org/tip/a5a25b32c08a31c03258ec4960bec26caaf76e9a
Author: Michal Koutný <mkoutny@...e.com>
AuthorDate: Mon, 10 Mar 2025 18:04:35 +01:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Tue, 08 Apr 2025 20:55:53 +02:00
sched: Always initialize rt_rq's task_group
rt_rq->tg may be NULL which denotes the root task_group.
Store the pointer to root_task_group directly so that callers may use
rt_rq->tg homogenously.
root_task_group exists always with CONFIG_CGROUPS_SCHED,
CONFIG_RT_GROUP_SCHED depends on that.
This changes root level rt_rq's default limit from infinity to the
value of (originally) global RT throttling.
Signed-off-by: Michal Koutný <mkoutny@...e.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Link: https://lkml.kernel.org/r/20250310170442.504716-4-mkoutny@suse.com
---
kernel/sched/rt.c | 7 ++-----
kernel/sched/sched.h | 2 ++
2 files changed, 4 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 61ec29b..1af3996 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -89,6 +89,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
+ rt_rq->tg = &root_task_group;
#endif
}
@@ -482,9 +483,6 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
- if (!rt_rq->tg)
- return RUNTIME_INF;
-
return rt_rq->rt_runtime;
}
@@ -1154,8 +1152,7 @@ inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted++;
- if (rt_rq->tg)
- start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+ start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
}
static void
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 47972f3..c006348 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -821,6 +821,8 @@ struct rt_rq {
unsigned int rt_nr_boosted;
struct rq *rq;
+#endif
+#ifdef CONFIG_CGROUP_SCHED
struct task_group *tg;
#endif
};
Powered by blists - more mailing lists