[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250605071412.139240-5-yurand2000@gmail.com>
Date: Thu, 5 Jun 2025 09:14:07 +0200
From: Yuri Andriaccio <yurand2000@...il.com>
To: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>
Cc: linux-kernel@...r.kernel.org,
Luca Abeni <luca.abeni@...tannapisa.it>,
Yuri Andriaccio <yuri.andriaccio@...tannapisa.it>
Subject: [RFC PATCH 4/9] sched/rt: Move some inline functions from rt.c to sched.h
From: luca abeni <luca.abeni@...tannapisa.it>
Make the following functions be non-static and move them in sched.h, so that they
can be used also in other source files:
- rt_task_of()
- rq_of_rt_rq()
- rt_rq_of_se()
- rq_of_rt_se()
There are no functional changes. This is needed by future patches.
Signed-off-by: luca abeni <luca.abeni@...tannapisa.it>
---
kernel/sched/rt.c | 52 --------------------------------------------
kernel/sched/sched.h | 51 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 51 insertions(+), 52 deletions(-)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 046a89fc7..382126274 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -167,34 +167,6 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
-static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
-{
- WARN_ON_ONCE(!rt_entity_is_task(rt_se));
-
- return container_of(rt_se, struct task_struct, rt);
-}
-
-static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
-{
- /* Cannot fold with non-CONFIG_RT_GROUP_SCHED version, layout */
- WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
- return rt_rq->rq;
-}
-
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
-{
- WARN_ON(!rt_group_sched_enabled() && rt_se->rt_rq->tg != &root_task_group);
- return rt_se->rt_rq;
-}
-
-static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
-{
- struct rt_rq *rt_rq = rt_se->rt_rq;
-
- WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
- return rt_rq->rq;
-}
-
void unregister_rt_sched_group(struct task_group *tg)
{
if (!rt_group_sched_enabled())
@@ -295,30 +267,6 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
#define rt_entity_is_task(rt_se) (1)
-static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
-{
- return container_of(rt_se, struct task_struct, rt);
-}
-
-static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
-{
- return container_of(rt_rq, struct rq, rt);
-}
-
-static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
-{
- struct task_struct *p = rt_task_of(rt_se);
-
- return task_rq(p);
-}
-
-static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
-{
- struct rq *rq = rq_of_rt_se(rt_se);
-
- return &rq->rt;
-}
-
void unregister_rt_sched_group(struct task_group *tg) { }
void free_rt_sched_group(struct task_group *tg) { }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 755ff5734..439a95239 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3128,6 +3128,57 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
#endif /* !CONFIG_SMP */
+#ifdef CONFIG_RT_GROUP_SCHED
+static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+{
+#ifdef CONFIG_SCHED_DEBUG
+ WARN_ON_ONCE(rt_se->my_q);
+#endif
+ return container_of(rt_se, struct task_struct, rt);
+}
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+ return rt_rq->rq;
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+ return rt_se->rt_rq;
+}
+
+static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
+{
+ struct rt_rq *rt_rq = rt_se->rt_rq;
+
+ return rt_rq->rq;
+}
+#else
+static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
+{
+ return container_of(rt_se, struct task_struct, rt);
+}
+
+static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+ return container_of(rt_rq, struct rq, rt);
+}
+
+static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
+{
+ struct task_struct *p = rt_task_of(rt_se);
+
+ return task_rq(p);
+}
+
+static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
+{
+ struct rq *rq = rq_of_rt_se(rt_se);
+
+ return &rq->rt;
+}
+#endif
+
DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq,
double_rq_lock(_T->lock, _T->lock2),
double_rq_unlock(_T->lock, _T->lock2))
--
2.49.0
Powered by blists - more mailing lists