[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250903194503.1679687-3-zecheng@google.com>
Date: Wed, 3 Sep 2025 19:45:01 +0000
From: Zecheng Li <zecheng@...gle.com>
To: Ingo Molnar <mingo@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>, Vincent Guittot <vincent.guittot@...aro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@....com>, Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>, Rik van Riel <riel@...riel.com>, Chris Mason <clm@...com>,
Madadi Vineeth Reddy <vineethr@...ux.ibm.com>, Xu Liu <xliuprof@...gle.com>,
Blake Jones <blakejones@...gle.com>, Josh Don <joshdon@...gle.com>, linux-kernel@...r.kernel.org,
Zecheng Li <zecheng@...gle.com>
Subject: [PATCH v4 2/3] sched/fair: Remove task_group->se pointer array
Now that struct sched_entity is co-located with struct cfs_rq for
non-root task groups, the task_group->se pointer array is redundant. The
associated sched_entity can be loaded directly from the cfs_rq.
This patch performs the access conversion with the helpers:
- is_root_task_group(tg): checks if a task group is the root task group.
It compares the task group's address with the global root_task_group
variable.
- tg_se(tg, cpu): retrieves the cfs_rq and returns the address of the
co-located se. This function checks if tg is the root task group to
ensure behaving the same of previous tg->se[cpu]. Replaces all accesses
that use the tg->se[cpu] pointer array with calls to the new tg_se(tg,
cpu) accessor.
- cfs_rq_se(cfs_rq): simplifies access paths like cfs_rq->tg->se[...] to
use the co-located sched_entity. This function also checks if tg is the
root task group to ensure same behavior.
Since tg_se is not in very hot code paths, and the branch is a register
comparison with an immediate value (`&root_task_group`), the performance
impact is expected to be negligible.
Signed-off-by: Zecheng Li <zecheng@...gle.com>
---
kernel/sched/core.c | 7 ++-----
kernel/sched/debug.c | 2 +-
kernel/sched/fair.c | 27 ++++++++++-----------------
kernel/sched/sched.h | 29 ++++++++++++++++++++++++-----
4 files changed, 37 insertions(+), 28 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index feb750aae71b..11efddf383ed 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8691,7 +8691,7 @@ void __init sched_init(void)
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
- ptr += 2 * nr_cpu_ids * sizeof(void **);
+ ptr += nr_cpu_ids * sizeof(void **);
#endif
#ifdef CONFIG_RT_GROUP_SCHED
ptr += 2 * nr_cpu_ids * sizeof(void **);
@@ -8700,9 +8700,6 @@ void __init sched_init(void)
ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
#ifdef CONFIG_FAIR_GROUP_SCHED
- root_task_group.se = (struct sched_entity **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
-
root_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
@@ -9785,7 +9782,7 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
int i;
for_each_possible_cpu(i) {
- stats = __schedstats_from_se(tg->se[i]);
+ stats = __schedstats_from_se(tg_se(tg, i));
ws += schedstat_val(stats->wait_sum);
}
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 02e16b70a790..16542596d4b0 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -644,7 +644,7 @@ void dirty_sched_domain_sysctl(int cpu)
#ifdef CONFIG_FAIR_GROUP_SCHED
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
{
- struct sched_entity *se = tg->se[cpu];
+ struct sched_entity *se = tg_se(tg, cpu);
#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", \
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fc8bc2d4614f..8777e288f0e0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6014,7 +6014,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
- struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
+ struct sched_entity *se;
/*
* It's possible we are called with !runtime_remaining due to things
@@ -6029,7 +6029,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
if (cfs_rq->runtime_enabled && cfs_rq->runtime_remaining <= 0)
return;
- se = cfs_rq->tg->se[cpu_of(rq)];
+ se = cfs_rq_se(cfs_rq);
cfs_rq->throttled = 0;
@@ -9763,7 +9763,6 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
{
struct cfs_rq *cfs_rq, *pos;
bool decayed = false;
- int cpu = cpu_of(rq);
/*
* Iterates the task_group tree in a bottom up fashion, see
@@ -9783,7 +9782,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
}
/* Propagate pending load changes to the parent, if any: */
- se = cfs_rq->tg->se[cpu];
+ se = cfs_rq_se(cfs_rq);
if (se && !skip_blocked_update(se))
update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
@@ -9809,8 +9808,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
*/
static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
{
- struct rq *rq = rq_of(cfs_rq);
- struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
+ struct sched_entity *se = cfs_rq_se(cfs_rq);
unsigned long now = jiffies;
unsigned long load;
@@ -13329,7 +13327,6 @@ void free_fair_sched_group(struct task_group *tg)
}
kfree(tg->cfs_rq);
- kfree(tg->se);
}
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
@@ -13342,9 +13339,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
if (!tg->cfs_rq)
goto err;
- tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
- if (!tg->se)
- goto err;
tg->shares = NICE_0_LOAD;
@@ -13359,7 +13353,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
cfs_rq = &combined->cfs_rq;
se = &combined->se;
init_cfs_rq(cfs_rq);
- init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
+ init_tg_cfs_entry(tg, cfs_rq, se, i, tg_se(parent, i));
init_entity_runnable_average(se);
}
@@ -13378,7 +13372,7 @@ void online_fair_sched_group(struct task_group *tg)
for_each_possible_cpu(i) {
rq = cpu_rq(i);
- se = tg->se[i];
+ se = tg_se(tg, i);
rq_lock_irq(rq, &rf);
update_rq_clock(rq);
attach_entity_cfs_rq(se);
@@ -13395,7 +13389,7 @@ void unregister_fair_sched_group(struct task_group *tg)
for_each_possible_cpu(cpu) {
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
- struct sched_entity *se = tg->se[cpu];
+ struct sched_entity *se = tg_se(tg, cpu);
struct rq *rq = cpu_rq(cpu);
if (se) {
@@ -13432,7 +13426,6 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
init_cfs_rq_runtime(cfs_rq);
tg->cfs_rq[cpu] = cfs_rq;
- tg->se[cpu] = se;
/* se could be NULL for root_task_group */
if (!se)
@@ -13463,7 +13456,7 @@ static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
/*
* We can't change the weight of the root cgroup.
*/
- if (!tg->se[0])
+ if (is_root_task_group(tg))
return -EINVAL;
shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
@@ -13474,7 +13467,7 @@ static int __sched_group_set_shares(struct task_group *tg, unsigned long shares)
tg->shares = shares;
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
- struct sched_entity *se = tg->se[i];
+ struct sched_entity *se = tg_se(tg, i);
struct rq_flags rf;
/* Propagate contribution to hierarchy */
@@ -13525,7 +13518,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
- struct sched_entity *se = tg->se[i];
+ struct sched_entity *se = tg_se(tg, i);
struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i];
bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
long idle_task_delta;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 8b37de788eeb..2b350c25f116 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -452,8 +452,6 @@ struct task_group {
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
- /* schedulable entities of this group on each CPU */
- struct sched_entity **se;
/* runqueue "owned" by this group on each CPU */
struct cfs_rq **cfs_rq;
unsigned long shares;
@@ -899,7 +897,8 @@ struct dl_rq {
};
#ifdef CONFIG_FAIR_GROUP_SCHED
-
+/* Check whether a task group is root tg */
+#define is_root_task_group(tg) ((tg) == &root_task_group)
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se) (!se->my_q)
@@ -1583,6 +1582,26 @@ static inline struct task_struct *task_of(struct sched_entity *se)
return container_of(se, struct task_struct, se);
}
+static inline struct sched_entity *tg_se(struct task_group *tg, int cpu)
+{
+ if (is_root_task_group(tg))
+ return NULL;
+
+ struct cfs_rq_with_se *combined =
+ container_of(tg->cfs_rq[cpu], struct cfs_rq_with_se, cfs_rq);
+ return &combined->se;
+}
+
+static inline struct sched_entity *cfs_rq_se(struct cfs_rq *cfs_rq)
+{
+ if (is_root_task_group(cfs_rq->tg))
+ return NULL;
+
+ struct cfs_rq_with_se *combined =
+ container_of(cfs_rq, struct cfs_rq_with_se, cfs_rq);
+ return &combined->se;
+}
+
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
return p->se.cfs_rq;
@@ -2151,8 +2170,8 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
#ifdef CONFIG_FAIR_GROUP_SCHED
set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
p->se.cfs_rq = tg->cfs_rq[cpu];
- p->se.parent = tg->se[cpu];
- p->se.depth = tg->se[cpu] ? tg->se[cpu]->depth + 1 : 0;
+ p->se.parent = tg_se(tg, cpu);
+ p->se.depth = p->se.parent ? p->se.parent->depth + 1 : 0;
#endif
#ifdef CONFIG_RT_GROUP_SCHED
--
2.51.0.338.gd7d06c2dae-goog
Powered by blists - more mailing lists