[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240711130004.2157737-5-vschneid@redhat.com>
Date: Thu, 11 Jul 2024 14:59:58 +0200
From: Valentin Schneider <vschneid@...hat.com>
To: linux-kernel@...r.kernel.org,
rcu@...r.kernel.org
Cc: Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Phil Auld <pauld@...hat.com>,
Clark Williams <williams@...hat.com>,
Tomas Glozar <tglozar@...hat.com>,
"Paul E. McKenney" <paulmck@...nel.org>,
Frederic Weisbecker <frederic@...nel.org>,
Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>,
Josh Triplett <josh@...htriplett.org>,
Boqun Feng <boqun.feng@...il.com>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Lai Jiangshan <jiangshanlai@...il.com>,
Zqiang <qiang.zhang1211@...il.com>,
Alexander Gordeev <agordeev@...ux.ibm.com>,
Catalin Marinas <catalin.marinas@....com>,
Arnd Bergmann <arnd@...db.de>,
Guo Ren <guoren@...nel.org>,
Palmer Dabbelt <palmer@...osinc.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Oleg Nesterov <oleg@...hat.com>,
Jens Axboe <axboe@...nel.dk>
Subject: [RFC PATCH v3 04/10] sched/fair: Introduce sched_throttle_work
Later commits will change CFS bandwidth control throttling from a
per-cfs_rq basis to a per-task basis. Actual throttling of a task will
happen in the return to user path, which will be implemented via a
task_work callback.
To ease reviewing, the infrastructure and helpers are added first, the
actual behaviour will be implemented when switching to per-task
throttling.
Add a task_work node to struct task_struct, and have it initialised at
sched_fork().
Signed-off-by: Valentin Schneider <vschneid@...hat.com>
---
include/linux/sched.h | 1 +
kernel/sched/core.c | 4 ++++
kernel/sched/fair.c | 12 ++++++++++++
kernel/sched/sched.h | 2 ++
4 files changed, 19 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 90691d99027e3..a4976eb5065fc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -812,6 +812,7 @@ struct task_struct {
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
+ struct callback_head sched_throttle_work;
#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6d35c48239be0..b811670d2c362 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4329,6 +4329,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.cfs_rq = NULL;
#endif
+#ifdef CONFIG_CFS_BANDWIDTH
+ init_cfs_throttle_work(p);
+#endif
+
#ifdef CONFIG_SCHEDSTATS
/* Even if schedstat is disabled, there should not be garbage */
memset(&p->stats, 0, sizeof(p->stats));
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9057584ec06de..775547cdd3ce0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5759,6 +5759,18 @@ static int tg_throttle_down(struct task_group *tg, void *data)
return 0;
}
+static void throttle_cfs_rq_work(struct callback_head *work)
+{
+
+}
+
+void init_cfs_throttle_work(struct task_struct *p)
+{
+ /* Protect against double add, see throttle_cfs_rq() and throttle_cfs_rq_work() */
+ p->sched_throttle_work.next = &p->sched_throttle_work;
+ init_task_work(&p->sched_throttle_work, throttle_cfs_rq_work);
+}
+
static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4c36cc6803617..943bca8263ffe 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2510,6 +2510,8 @@ extern void init_sched_dl_class(void);
extern void init_sched_rt_class(void);
extern void init_sched_fair_class(void);
+extern void init_cfs_throttle_work(struct task_struct *p);
+
extern void reweight_task(struct task_struct *p, const struct load_weight *lw);
extern void resched_curr(struct rq *rq);
--
2.43.0
Powered by blists - more mailing lists