[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251201124205.11169-7-yurand2000@gmail.com>
Date: Mon, 1 Dec 2025 13:41:39 +0100
From: Yuri Andriaccio <yurand2000@...il.com>
To: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>
Cc: linux-kernel@...r.kernel.org,
Luca Abeni <luca.abeni@...tannapisa.it>,
Yuri Andriaccio <yuri.andriaccio@...tannapisa.it>
Subject: [RFC PATCH v4 06/28] sched/rt: Remove rq field in struct rt_rq
The rq field now is just caching the pointer to the global runqueue of the
given rt_rq, so it is unnecessary as the global runqueue can be retrieved
in other ways.
Signed-off-by: Yuri Andriaccio <yurand2000@...il.com>
---
kernel/sched/rt.c | 7 ++-----
kernel/sched/sched.h | 19 +++++++++++++------
2 files changed, 15 insertions(+), 11 deletions(-)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 74038b27f8..21f4e94235 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -100,10 +100,7 @@ void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu,
struct sched_rt_entity *parent)
{
- struct rq *rq = cpu_rq(cpu);
-
rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
- rt_rq->rq = rq;
rt_rq->tg = tg;
tg->rt_rq[cpu] = rt_rq;
@@ -183,7 +180,7 @@ static void pull_rt_task(struct rq *);
static inline void rt_queue_push_tasks(struct rt_rq *rt_rq)
{
- struct rq *rq = container_of(rt_rq, struct rq, rt);
+ struct rq *rq = served_rq_of_rt_rq(rt_rq);
if (!has_pushable_tasks(rt_rq))
return;
@@ -193,7 +190,7 @@ static inline void rt_queue_push_tasks(struct rt_rq *rt_rq)
static inline void rt_queue_pull_task(struct rt_rq *rt_rq)
{
- struct rq *rq = container_of(rt_rq, struct rq, rt);
+ struct rq *rq = served_rq_of_rt_rq(rt_rq);
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5c48d6a5e6..1896c4e247 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3074,11 +3074,16 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
return container_of(rt_se, struct task_struct, rt);
}
+static inline struct rq *served_rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+ WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
+ return container_of(rt_rq, struct rq, rt);
+}
+
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
/* Cannot fold with non-CONFIG_RT_GROUP_SCHED version, layout */
- WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
- return rt_rq->rq;
+ return cpu_rq(served_rq_of_rt_rq(rt_rq)->cpu);
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
@@ -3089,10 +3094,7 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
- struct rt_rq *rt_rq = rt_se->rt_rq;
-
- WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
- return rt_rq->rq;
+ return rq_of_rt_rq(rt_se->rt_rq);
}
#else
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
@@ -3100,6 +3102,11 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
return container_of(rt_se, struct task_struct, rt);
}
+static inline struct rq *served_rq_of_rt_rq(struct rt_rq *rt_rq)
+{
+ return container_of(rt_rq, struct rq, rt);
+}
+
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
--
2.51.0
Powered by blists - more mailing lists