[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250609193834.2556866-4-zecheng@google.com>
Date: Mon, 9 Jun 2025 19:38:33 +0000
From: Zecheng Li <zecheng@...gle.com>
To: Ingo Molnar <mingo@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>, Vincent Guittot <vincent.guittot@...aro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@....com>, Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>, Mel Gorman <mgorman@...e.de>,
Valentin Schneider <vschneid@...hat.com>, Xu Liu <xliuprof@...gle.com>,
Blake Jones <blakejones@...gle.com>, Josh Don <joshdon@...gle.com>, linux-kernel@...r.kernel.org,
Zecheng Li <zecheng@...gle.com>
Subject: [RFC PATCH v2 3/3] sched/fair: Allocate the combined cfs_rq/se struct per-cpu
To remove the cfs_rq pointer array in task_group, allocate the combined
cfs_rq and sched_entity using the per-cpu allocator.
This patch implements the following:
- Changes task_group->cfs_rq from struct cfs_rq ** to struct cfs_rq
__percpu *.
- Updates memory allocation in alloc_fair_sched_group() and
free_fair_sched_group() to use alloc_percpu() and free_percpu()
respectively.
- Uses the inline accessor tg_cfs_rq(tg, cpu) with per_cpu_ptr() to
retrieve the pointer to cfs_rq for the given task group and CPU.
- Replaces direct accesses tg->cfs_rq[cpu] with calls to the new
tg_cfs_rq(tg, cpu) helper.
- Handles the root_task_group: since struct rq is already a per-cpu
variable (runqueues), its embedded cfs_rq (rq->cfs) is also per-cpu.
Therefore, we assign root_task_group.cfs_rq = &runqueues.cfs.
- Cleanup the code in initializing the root task group.
This change places each CPU's cfs_rq and sched_entity in its local
per-cpu memory area.
Signed-off-by: Zecheng Li <zecheng@...gle.com>
---
kernel/sched/core.c | 35 ++++++++++----------------
kernel/sched/fair.c | 59 +++++++++++++++++---------------------------
kernel/sched/sched.h | 13 +++++++---
3 files changed, 45 insertions(+), 62 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8598492854fc..60b9872e4b01 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8526,7 +8526,7 @@ static struct kmem_cache *task_group_cache __ro_after_init;
void __init sched_init(void)
{
- unsigned long ptr = 0;
+ unsigned long __maybe_unused ptr = 0;
int i;
/* Make sure the linker didn't screw up */
@@ -8544,33 +8544,24 @@ void __init sched_init(void)
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
- ptr += nr_cpu_ids * sizeof(void **);
-#endif
-#ifdef CONFIG_RT_GROUP_SCHED
- ptr += 2 * nr_cpu_ids * sizeof(void **);
-#endif
- if (ptr) {
- ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
+ root_task_group.cfs_rq = &runqueues.cfs;
-#ifdef CONFIG_FAIR_GROUP_SCHED
- root_task_group.cfs_rq = (struct cfs_rq **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
-
- root_task_group.shares = ROOT_TASK_GROUP_LOAD;
- init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
+ root_task_group.shares = ROOT_TASK_GROUP_LOAD;
+ init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_EXT_GROUP_SCHED
- root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
+ root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
#endif /* CONFIG_EXT_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
- root_task_group.rt_se = (struct sched_rt_entity **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
+ ptr += 2 * nr_cpu_ids * sizeof(void **);
+ ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT);
+ root_task_group.rt_se = (struct sched_rt_entity **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
- root_task_group.rt_rq = (struct rt_rq **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
+ root_task_group.rt_rq = (struct rt_rq **)ptr;
+ ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */
- }
#ifdef CONFIG_SMP
init_defrootdomain();
@@ -9511,7 +9502,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota,
}
for_each_online_cpu(i) {
- struct cfs_rq *cfs_rq = tg->cfs_rq[i];
+ struct cfs_rq *cfs_rq = tg_cfs_rq(tg, i);
struct rq *rq = cfs_rq->rq;
guard(rq_lock_irq)(rq);
@@ -9759,7 +9750,7 @@ static u64 throttled_time_self(struct task_group *tg)
u64 total = 0;
for_each_possible_cpu(i) {
- total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
+ total += READ_ONCE(tg_cfs_rq(tg, i)->throttled_clock_self_time);
}
return total;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b099b593f364..8c13bc1bbe08 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -329,7 +329,7 @@ static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
* to a tree or when we reach the top of the tree
*/
if (cfs_rq->tg->parent &&
- cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
+ tg_cfs_rq(cfs_rq->tg->parent, cpu)->on_list) {
/*
* If parent is already on the list, we add the child
* just before. Thanks to circular linked property of
@@ -337,7 +337,7 @@ static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
* of the list that starts by parent.
*/
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
- &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
+ &(tg_cfs_rq(cfs_rq->tg->parent, cpu)->leaf_cfs_rq_list));
/*
* The branch is now connected to its tree so we can
* reset tmp_alone_branch to the beginning of the
@@ -4168,7 +4168,7 @@ static void __maybe_unused clear_tg_offline_cfs_rqs(struct rq *rq)
rcu_read_lock();
list_for_each_entry_rcu(tg, &task_groups, list) {
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+ struct cfs_rq *cfs_rq = tg_cfs_rq(tg, cpu_of(rq));
clear_tg_load_avg(cfs_rq);
}
@@ -5823,8 +5823,8 @@ static inline int throttled_lb_pair(struct task_group *tg,
{
struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
- src_cfs_rq = tg->cfs_rq[src_cpu];
- dest_cfs_rq = tg->cfs_rq[dest_cpu];
+ src_cfs_rq = tg_cfs_rq(tg, src_cpu);
+ dest_cfs_rq = tg_cfs_rq(tg, dest_cpu);
return throttled_hierarchy(src_cfs_rq) ||
throttled_hierarchy(dest_cfs_rq);
@@ -5833,7 +5833,7 @@ static inline int throttled_lb_pair(struct task_group *tg,
static int tg_unthrottle_up(struct task_group *tg, void *data)
{
struct rq *rq = data;
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+ struct cfs_rq *cfs_rq = tg_cfs_rq(tg, cpu_of(rq));
cfs_rq->throttle_count--;
if (!cfs_rq->throttle_count) {
@@ -5862,7 +5862,7 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
static int tg_throttle_down(struct task_group *tg, void *data)
{
struct rq *rq = data;
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+ struct cfs_rq *cfs_rq = tg_cfs_rq(tg, cpu_of(rq));
/* group is entering throttled state, stop time */
if (!cfs_rq->throttle_count) {
@@ -6449,8 +6449,8 @@ static void sync_throttle(struct task_group *tg, int cpu)
if (!tg->parent)
return;
- cfs_rq = tg->cfs_rq[cpu];
- pcfs_rq = tg->parent->cfs_rq[cpu];
+ cfs_rq = tg_cfs_rq(tg, cpu);
+ pcfs_rq = tg_cfs_rq(tg->parent, cpu);
cfs_rq->throttle_count = pcfs_rq->throttle_count;
cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
@@ -6635,7 +6635,7 @@ static void __maybe_unused update_runtime_enabled(struct rq *rq)
rcu_read_lock();
list_for_each_entry_rcu(tg, &task_groups, list) {
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+ struct cfs_rq *cfs_rq = tg_cfs_rq(tg, cpu_of(rq));
raw_spin_lock(&cfs_b->lock);
cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
@@ -6664,7 +6664,7 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
rcu_read_lock();
list_for_each_entry_rcu(tg, &task_groups, list) {
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
+ struct cfs_rq *cfs_rq = tg_cfs_rq(tg, cpu_of(rq));
if (!cfs_rq->runtime_enabled)
continue;
@@ -9364,7 +9364,7 @@ static inline int task_is_ineligible_on_dst_cpu(struct task_struct *p, int dest_
struct cfs_rq *dst_cfs_rq;
#ifdef CONFIG_FAIR_GROUP_SCHED
- dst_cfs_rq = task_group(p)->cfs_rq[dest_cpu];
+ dst_cfs_rq = tg_cfs_rq(task_group(p), dest_cpu);
#else
dst_cfs_rq = &cpu_rq(dest_cpu)->cfs;
#endif
@@ -13080,7 +13080,7 @@ static int task_is_throttled_fair(struct task_struct *p, int cpu)
struct cfs_rq *cfs_rq;
#ifdef CONFIG_FAIR_GROUP_SCHED
- cfs_rq = task_group(p)->cfs_rq[cpu];
+ cfs_rq = tg_cfs_rq(task_group(p), cpu);
#else
cfs_rq = &cpu_rq(cpu)->cfs;
#endif
@@ -13336,42 +13336,31 @@ static void task_change_group_fair(struct task_struct *p)
void free_fair_sched_group(struct task_group *tg)
{
- int i;
-
- for_each_possible_cpu(i) {
- if (tg->cfs_rq && tg->cfs_rq[i]) {
- struct cfs_rq_with_se *combined =
- container_of(tg->cfs_rq[i], struct cfs_rq_with_se, cfs_rq);
- kfree(combined);
- }
- }
-
- kfree(tg->cfs_rq);
+ free_percpu(tg->cfs_rq);
}
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
- struct cfs_rq_with_se *combined;
+ struct cfs_rq_with_se __percpu *combined;
struct sched_entity *se;
struct cfs_rq *cfs_rq;
int i;
- tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
- if (!tg->cfs_rq)
+ combined = alloc_percpu_gfp(struct cfs_rq_with_se, GFP_KERNEL);
+ if (!combined)
goto err;
+ tg->cfs_rq = &combined->cfs_rq;
tg->shares = NICE_0_LOAD;
init_cfs_bandwidth(tg_cfs_bandwidth(tg), tg_cfs_bandwidth(parent));
for_each_possible_cpu(i) {
- combined = kzalloc_node(sizeof(struct cfs_rq_with_se),
- GFP_KERNEL, cpu_to_node(i));
- if (!combined)
+ cfs_rq = tg_cfs_rq(tg, i);
+ if (!cfs_rq)
goto err;
- cfs_rq = &combined->cfs_rq;
- se = &combined->se;
+ se = tg_se(tg, i);
init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, tg_se(parent, i));
init_entity_runnable_average(se);
@@ -13408,7 +13397,7 @@ void unregister_fair_sched_group(struct task_group *tg)
destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(cpu) {
- struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
+ struct cfs_rq *cfs_rq = tg_cfs_rq(tg, cpu);
struct sched_entity *se = tg_se(tg, cpu);
struct rq *rq = cpu_rq(cpu);
@@ -13445,8 +13434,6 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
cfs_rq->rq = rq;
init_cfs_rq_runtime(cfs_rq);
- tg->cfs_rq[cpu] = cfs_rq;
-
/* se could be NULL for root_task_group */
if (!se)
return;
@@ -13539,7 +13526,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
for_each_possible_cpu(i) {
struct rq *rq = cpu_rq(i);
struct sched_entity *se = tg_se(tg, i);
- struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i];
+ struct cfs_rq *grp_cfs_rq = tg_cfs_rq(tg, i);
bool was_idle = cfs_rq_is_idle(grp_cfs_rq);
long idle_task_delta;
struct rq_flags rf;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 08e17746ea01..a04d20fc9ff2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -438,7 +438,7 @@ struct task_group {
#ifdef CONFIG_FAIR_GROUP_SCHED
/* runqueue "owned" by this group on each CPU */
- struct cfs_rq **cfs_rq;
+ struct cfs_rq __percpu *cfs_rq;
unsigned long shares;
#ifdef CONFIG_SMP
/*
@@ -1573,6 +1573,11 @@ static inline struct task_struct *task_of(struct sched_entity *se)
WARN_ON_ONCE(!entity_is_task(se));
return container_of(se, struct task_struct, se);
}
+/* Access a specific CPU's cfs_rq from a task group */
+static inline struct cfs_rq *tg_cfs_rq(struct task_group *tg, int cpu)
+{
+ return per_cpu_ptr(tg->cfs_rq, cpu);
+}
static inline struct sched_entity *tg_se(struct task_group *tg, int cpu)
{
@@ -1580,7 +1585,7 @@ static inline struct sched_entity *tg_se(struct task_group *tg, int cpu)
return NULL;
struct cfs_rq_with_se *combined =
- container_of(tg->cfs_rq[cpu], struct cfs_rq_with_se, cfs_rq);
+ container_of(tg_cfs_rq(tg, cpu), struct cfs_rq_with_se, cfs_rq);
return &combined->se;
}
@@ -2166,8 +2171,8 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
- set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
- p->se.cfs_rq = tg->cfs_rq[cpu];
+ set_task_rq_fair(&p->se, p->se.cfs_rq, tg_cfs_rq(tg, cpu));
+ p->se.cfs_rq = tg_cfs_rq(tg, cpu);
p->se.parent = tg_se(tg, cpu);
p->se.depth = p->se.parent ? p->se.parent->depth + 1 : 0;
#endif
--
2.50.0.rc0.604.gd4ff7b7c86-goog
Powered by blists - more mailing lists