[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230317213333.2174969-9-tj@kernel.org>
Date: Fri, 17 Mar 2023 11:33:09 -1000
From: Tejun Heo <tj@...nel.org>
To: torvalds@...ux-foundation.org, mingo@...hat.com,
peterz@...radead.org, juri.lelli@...hat.com,
vincent.guittot@...aro.org, dietmar.eggemann@....com,
rostedt@...dmis.org, bsegall@...gle.com, mgorman@...e.de,
bristot@...hat.com, vschneid@...hat.com, ast@...nel.org,
daniel@...earbox.net, andrii@...nel.org, martin.lau@...nel.org,
joshdon@...gle.com, brho@...gle.com, pjt@...gle.com,
derkling@...gle.com, haoluo@...gle.com, dvernet@...a.com,
dschatzberg@...a.com, dskarlat@...cmu.edu, riel@...riel.com
Cc: linux-kernel@...r.kernel.org, bpf@...r.kernel.org,
kernel-team@...a.com, Tejun Heo <tj@...nel.org>,
kernel test robot <lkp@...el.com>
Subject: [PATCH 08/32] sched: Expose css_tg(), __setscheduler_prio() and SCHED_CHANGE_BLOCK()
These will be used by a new BPF extensible sched_class.
css_tg() will be used in the init and exit paths to visit all task_groups by
walking cgroups.
__setscheduler_prio() is used to pick the sched_class matching the current
prio of the task. For the new BPF extensible sched_class, the mapping from
the task configuration to sched_class isn't static and depends on a few
factors - e.g. whether the BPF progs implementing the scheduler are loaded
and in a serviceable state. That mapping logic will be added to
__setscheduler_prio().
When the BPF scheduler progs get loaded and unloaded, the mapping changes
and the new sched_class will walk the tasks applying the new mapping using
SCHED_CHANGE_BLOCK() and __setscheduler_prio().
v2: Expose SCHED_CHANGE_BLOCK() too and update the description.
Signed-off-by: Tejun Heo <tj@...nel.org>
Reviewed-by: David Vernet <dvernet@...a.com>
Acked-by: Josh Don <joshdon@...gle.com>
Acked-by: Hao Luo <haoluo@...gle.com>
Acked-by: Barret Rhoden <brho@...gle.com>
Reported-by: kernel test robot <lkp@...el.com>
---
kernel/sched/core.c | 47 +++----------------------------------------
kernel/sched/sched.h | 48 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 51 insertions(+), 44 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fc7008095249..2a602f93f5f8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2096,15 +2096,7 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
dequeue_task(rq, p, flags);
}
-struct sched_change_guard {
- struct task_struct *p;
- struct rq *rq;
- bool queued;
- bool running;
- bool done;
-};
-
-static struct sched_change_guard
+struct sched_change_guard
sched_change_guard_init(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_change_guard cg = {
@@ -2129,7 +2121,7 @@ sched_change_guard_init(struct rq *rq, struct task_struct *p, int flags)
return cg;
}
-static void sched_change_guard_fini(struct sched_change_guard *cg, int flags)
+void sched_change_guard_fini(struct sched_change_guard *cg, int flags)
{
if (cg->queued)
enqueue_task(cg->rq, cg->p, flags | ENQUEUE_NOCLOCK);
@@ -2138,34 +2130,6 @@ static void sched_change_guard_fini(struct sched_change_guard *cg, int flags)
cg->done = true;
}
-/**
- * SCHED_CHANGE_BLOCK - Nested block for task attribute updates
- * @__rq: Runqueue the target task belongs to
- * @__p: Target task
- * @__flags: DEQUEUE/ENQUEUE_* flags
- *
- * A task may need to be dequeued and put_prev_task'd for attribute updates and
- * set_next_task'd and re-enqueued afterwards. This helper defines a nested
- * block which automatically handles these preparation and cleanup operations.
- *
- * SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) {
- * update_attribute(p);
- * ...
- * }
- *
- * If @__flags is a variable, the variable may be updated in the block body and
- * the updated value will be used when re-enqueueing @p.
- *
- * If %DEQUEUE_NOCLOCK is specified, the caller is responsible for calling
- * update_rq_clock() beforehand. Otherwise, the rq clock is automatically
- * updated iff the task needs to be dequeued and re-enqueued. Only the former
- * case guarantees that the rq clock is up-to-date inside and after the block.
- */
-#define SCHED_CHANGE_BLOCK(__rq, __p, __flags) \
- for (struct sched_change_guard __cg = \
- sched_change_guard_init(__rq, __p, __flags); \
- !__cg.done; sched_change_guard_fini(&__cg, __flags))
-
static inline int __normal_prio(int policy, int rt_prio, int nice)
{
int prio;
@@ -7016,7 +6980,7 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag
}
EXPORT_SYMBOL(default_wake_function);
-static void __setscheduler_prio(struct task_struct *p, int prio)
+void __setscheduler_prio(struct task_struct *p, int prio)
{
if (dl_prio(prio))
p->sched_class = &dl_sched_class;
@@ -10413,11 +10377,6 @@ void sched_move_task(struct task_struct *tsk)
task_rq_unlock(rq, tsk, &rf);
}
-static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
-{
- return css ? container_of(css, struct task_group, css) : NULL;
-}
-
static struct cgroup_subsys_state *
cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9a6cba6f9299..866ce69a445e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -469,6 +469,11 @@ static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
return walk_tg_tree_from(&root_task_group, down, up, data);
}
+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct task_group, css) : NULL;
+}
+
extern int tg_nop(struct task_group *tg, void *data);
extern void free_fair_sched_group(struct task_group *tg);
@@ -2386,6 +2391,8 @@ extern void init_sched_dl_class(void);
extern void init_sched_rt_class(void);
extern void init_sched_fair_class(void);
+extern void __setscheduler_prio(struct task_struct *p, int prio);
+
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -2466,6 +2473,47 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
+struct sched_change_guard {
+ struct task_struct *p;
+ struct rq *rq;
+ bool queued;
+ bool running;
+ bool done;
+};
+
+extern struct sched_change_guard
+sched_change_guard_init(struct rq *rq, struct task_struct *p, int flags);
+
+extern void sched_change_guard_fini(struct sched_change_guard *cg, int flags);
+
+/**
+ * SCHED_CHANGE_BLOCK - Nested block for task attribute updates
+ * @__rq: Runqueue the target task belongs to
+ * @__p: Target task
+ * @__flags: DEQUEUE/ENQUEUE_* flags
+ *
+ * A task may need to be dequeued and put_prev_task'd for attribute updates and
+ * set_next_task'd and re-enqueued afterwards. This helper defines a nested
+ * block which automatically handles these preparation and cleanup operations.
+ *
+ * SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) {
+ * update_attribute(p);
+ * ...
+ * }
+ *
+ * If @__flags is a variable, the variable may be updated in the block body and
+ * the updated value will be used when re-enqueueing @p.
+ *
+ * If %DEQUEUE_NOCLOCK is specified, the caller is responsible for calling
+ * update_rq_clock() beforehand. Otherwise, the rq clock is automatically
+ * updated iff the task needs to be dequeued and re-enqueued. Only the former
+ * case guarantees that the rq clock is up-to-date inside and after the block.
+ */
+#define SCHED_CHANGE_BLOCK(__rq, __p, __flags) \
+ for (struct sched_change_guard __cg = \
+ sched_change_guard_init(__rq, __p, __flags); \
+ !__cg.done; sched_change_guard_fini(&__cg, __flags))
+
extern void check_class_changing(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class);
extern void check_class_changed(struct rq *rq, struct task_struct *p,
--
2.39.2
Powered by blists - more mailing lists