[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231115113341.13261-3-CruzZhao@linux.alibaba.com>
Date: Wed, 15 Nov 2023 19:33:39 +0800
From: Cruz Zhao <CruzZhao@...ux.alibaba.com>
To: mingo@...hat.com, peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, vschneid@...hat.com, joel@...lfernandes.org
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH 2/4] sched/core: introduce core to struct cfs_rq
Introduce core to struct cfs_rq, indicates the corresponding cfs_rq of
rq->core.
Signed-off-by: Cruz Zhao <CruzZhao@...ux.alibaba.com>
---
kernel/sched/core.c | 4 ++++
kernel/sched/fair.c | 11 +++++++++++
kernel/sched/sched.h | 1 +
3 files changed, 16 insertions(+)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7a685fae73c4..647a12af9172 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6432,6 +6432,7 @@ static void sched_core_cpu_starting(unsigned int cpu)
if (t == cpu) {
rq->core = core_rq;
rq->core_id = core_id;
+ rq->cfs.core = &core_rq->cfs;
}
WARN_ON_ONCE(rq->core != core_rq);
@@ -6488,6 +6489,7 @@ static void sched_core_cpu_deactivate(unsigned int cpu)
rq = cpu_rq(t);
rq->core = core_rq;
rq->core_id = core_id;
+ rq->cfs.core = &core_rq->cfs;
}
}
@@ -6498,6 +6500,7 @@ static inline void sched_core_cpu_dying(unsigned int cpu)
if (rq->core != rq) {
rq->core = rq;
rq->core_id = cpu;
+ rq->cfs.core = &rq->cfs;
}
}
@@ -10016,6 +10019,7 @@ void __init sched_init(void)
#ifdef CONFIG_SCHED_CORE
rq->core = rq;
rq->core_id = i;
+ rq->cfs.core = &rq->cfs;
rq->core_pick = NULL;
rq->core_enabled = 0;
rq->core_tree = RB_ROOT;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2048138ce54b..61cbaa3cc385 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -12420,6 +12420,16 @@ bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
return delta > 0;
}
+void sched_core_init_cfs_rq(struct task_group *tg, struct cfs_rq *cfs_rq)
+{
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct rq *rq = rq_of(cfs_rq);
+ int core_id = rq->core_id;
+
+ cfs_rq->core = tg->cfs_rq[core_id];
+#endif
+}
+
static int task_is_throttled_fair(struct task_struct *p, int cpu)
{
struct cfs_rq *cfs_rq;
@@ -12715,6 +12725,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
+ sched_core_init_cfs_rq(tg, cfs_rq);
init_entity_runnable_average(se);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1b62165fc840..62fca54223a1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -547,6 +547,7 @@ struct cfs_rq {
#ifdef CONFIG_SCHED_CORE
unsigned int forceidle_seq;
u64 min_vruntime_fi;
+ struct cfs_rq *core;
#endif
#ifndef CONFIG_64BIT
--
2.39.3
Powered by blists - more mailing lists