[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aEyzhBAl5zkP6Ku-@slm.duckdns.org>
Date: Fri, 13 Jun 2025 13:25:56 -1000
From: Tejun Heo <tj@...nel.org>
To: David Vernet <void@...ifault.com>, Andrea Righi <arighi@...dia.com>,
Changwoo Min <changwoo@...lia.com>
Cc: linux-kernel@...r.kernel.org, Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>
Subject: [PATCH 2/2 sched_ext/for-6.16-fixes] sched_ext, sched/core: Don't
call scx_group_set_weight() prematurely from sched_create_group()
During task_group creation, sched_create_group() calls
scx_group_set_weight() with CGROUP_WEIGHT_DFL to initialize the sched_ext
portion. This is premature and ends up calling ops.cgroup_set_weight() with
an incorrect @cgrp before ops.cgroup_init() is called.
sched_create_group() should just initialize SCX related fields in the new
task_group. Fix it by factoring out scx_tg_init() from sched_init() and
making sched_create_group() call that function instead of
scx_group_set_weight().
Signed-off-by: Tejun Heo <tj@...nel.org>
Fixes: 819513666966 ("sched_ext: Add cgroup support")
Cc: stable@...r.kernel.org # v6.12+
---
Ingo, Peter, while this touches kernel/sched/core.c, the changes are trivial
and only affect sched_ext. I'm planning to route it through
sched_ext/for-6.14-fixes. Please holler if you want it routed through tip.
Thanks.
kernel/sched/core.c | 9 ++++-----
kernel/sched/ext.c | 5 +++++
kernel/sched/ext.h | 2 ++
3 files changed, 11 insertions(+), 5 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8544,17 +8544,16 @@ void __init sched_init(void)
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
-#ifdef CONFIG_EXT_GROUP_SCHED
- root_task_group.scx_weight = CGROUP_WEIGHT_DFL;
-#endif /* CONFIG_EXT_GROUP_SCHED */
+
#ifdef CONFIG_RT_GROUP_SCHED
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
-
#endif /* CONFIG_RT_GROUP_SCHED */
+
+ scx_tg_init(&root_task_group);
}
#ifdef CONFIG_SMP
@@ -8985,7 +8984,7 @@ struct task_group *sched_create_group(st
if (!alloc_rt_sched_group(tg, parent))
goto err;
- scx_group_set_weight(tg, CGROUP_WEIGHT_DFL);
+ scx_tg_init(tg);
alloc_uclamp_sched_group(tg, parent);
return tg;
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -4092,6 +4092,11 @@ bool scx_can_stop_tick(struct rq *rq)
DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem);
static bool scx_cgroup_enabled;
+void scx_tg_init(struct task_group *tg)
+{
+ tg->scx_weight = CGROUP_WEIGHT_DFL;
+}
+
int scx_tg_online(struct task_group *tg)
{
struct scx_sched *sch = scx_root;
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -79,6 +79,7 @@ static inline void scx_update_idle(struc
#ifdef CONFIG_CGROUP_SCHED
#ifdef CONFIG_EXT_GROUP_SCHED
+void scx_tg_init(struct task_group *tg);
int scx_tg_online(struct task_group *tg);
void scx_tg_offline(struct task_group *tg);
int scx_cgroup_can_attach(struct cgroup_taskset *tset);
@@ -88,6 +89,7 @@ void scx_cgroup_cancel_attach(struct cgr
void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight);
void scx_group_set_idle(struct task_group *tg, bool idle);
#else /* CONFIG_EXT_GROUP_SCHED */
+static inline void scx_tg_init(struct task_group *tg) {}
static inline int scx_tg_online(struct task_group *tg) { return 0; }
static inline void scx_tg_offline(struct task_group *tg) {}
static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; }
Powered by blists - more mailing lists