[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240623015057.3383223-4-tj@kernel.org>
Date: Sat, 22 Jun 2024 15:50:22 -1000
From: Tejun Heo <tj@...nel.org>
To: torvalds@...ux-foundation.org
Cc: void@...ifault.com,
mingo@...hat.com,
peterz@...radead.org,
tglx@...utronix.de,
linux-kernel@...r.kernel.org,
kernel-team@...a.com,
Tejun Heo <tj@...nel.org>
Subject: [PATCH 3/3] sched, sched_ext: Move some declarations from kernel/sched/ext.h to sched.h
While sched_ext was out of tree, everything sched_ext specific which can be
put in kernel/sched/ext.h was put there to ease forward porting. However,
kernel/sched/sched.h is the better location for some of them. Relocate.
- struct sched_enq_and_set_ctx, sched_deq_and_put_task() and
sched_enq_and_set_task().
- scx_enabled() and scx_switched_all().
- for_active_class_range() and for_each_active_class(). sched_class
declarations are moved above the class iterators for this.
No functional changes intended.
Signed-off-by: Tejun Heo <tj@...nel.org>
Suggested-by: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: David Vernet <void@...ifault.com>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
---
kernel/sched/ext.h | 39 --------------------------
kernel/sched/sched.h | 65 ++++++++++++++++++++++++++++++++++++++++----
2 files changed, 59 insertions(+), 45 deletions(-)
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
index 1d7837bdfaba..32d3a51f591a 100644
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -8,24 +8,6 @@
*/
#ifdef CONFIG_SCHED_CLASS_EXT
-struct sched_enq_and_set_ctx {
- struct task_struct *p;
- int queue_flags;
- bool queued;
- bool running;
-};
-
-void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
- struct sched_enq_and_set_ctx *ctx);
-void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
-
-extern const struct sched_class ext_sched_class;
-
-DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled);
-DECLARE_STATIC_KEY_FALSE(__scx_switched_all);
-#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled)
-#define scx_switched_all() static_branch_unlikely(&__scx_switched_all)
-
void scx_tick(struct rq *rq);
void init_scx_entity(struct sched_ext_entity *scx);
void scx_pre_fork(struct task_struct *p);
@@ -52,22 +34,6 @@ static inline bool task_on_scx(const struct task_struct *p)
return scx_enabled() && p->sched_class == &ext_sched_class;
}
-static inline const struct sched_class *next_active_class(const struct sched_class *class)
-{
- class++;
- if (scx_switched_all() && class == &fair_sched_class)
- class++;
- if (!scx_enabled() && class == &ext_sched_class)
- class++;
- return class;
-}
-
-#define for_active_class_range(class, _from, _to) \
- for (class = (_from); class != (_to); class = next_active_class(class))
-
-#define for_each_active_class(class) \
- for_active_class_range(class, __sched_class_highest, __sched_class_lowest)
-
#ifdef CONFIG_SCHED_CORE
bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
bool in_fi);
@@ -75,9 +41,6 @@ bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
#else /* CONFIG_SCHED_CLASS_EXT */
-#define scx_enabled() false
-#define scx_switched_all() false
-
static inline void scx_tick(struct rq *rq) {}
static inline void scx_pre_fork(struct task_struct *p) {}
static inline int scx_fork(struct task_struct *p) { return 0; }
@@ -91,8 +54,6 @@ static inline int scx_check_setscheduler(struct task_struct *p, int policy) { re
static inline bool task_on_scx(const struct task_struct *p) { return false; }
static inline void init_sched_ext_class(void) {}
-#define for_each_active_class for_each_class
-
#endif /* CONFIG_SCHED_CLASS_EXT */
#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 963a2fa180ad..2a433f760813 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2438,19 +2438,54 @@ const struct sched_class name##_sched_class \
extern struct sched_class __sched_class_highest[];
extern struct sched_class __sched_class_lowest[];
+extern const struct sched_class stop_sched_class;
+extern const struct sched_class dl_sched_class;
+extern const struct sched_class rt_sched_class;
+extern const struct sched_class fair_sched_class;
+extern const struct sched_class idle_sched_class;
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+extern const struct sched_class ext_sched_class;
+
+DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); /* SCX BPF scheduler loaded */
+DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */
+
+#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled)
+#define scx_switched_all() static_branch_unlikely(&__scx_switched_all)
+#else /* !CONFIG_SCHED_CLASS_EXT */
+#define scx_enabled() false
+#define scx_switched_all() false
+#endif /* !CONFIG_SCHED_CLASS_EXT */
+
+/*
+ * Iterate only active classes. SCX can take over all fair tasks or be
+ * completely disabled. If the former, skip fair. If the latter, skip SCX.
+ */
+static inline const struct sched_class *next_active_class(const struct sched_class *class)
+{
+ class++;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ if (scx_switched_all() && class == &fair_sched_class)
+ class++;
+ if (!scx_enabled() && class == &ext_sched_class)
+ class++;
+#endif
+ return class;
+}
+
#define for_class_range(class, _from, _to) \
for (class = (_from); class < (_to); class++)
#define for_each_class(class) \
for_class_range(class, __sched_class_highest, __sched_class_lowest)
-#define sched_class_above(_a, _b) ((_a) < (_b))
+#define for_active_class_range(class, _from, _to) \
+ for (class = (_from); class != (_to); class = next_active_class(class))
-extern const struct sched_class stop_sched_class;
-extern const struct sched_class dl_sched_class;
-extern const struct sched_class rt_sched_class;
-extern const struct sched_class fair_sched_class;
-extern const struct sched_class idle_sched_class;
+#define for_each_active_class(class) \
+ for_active_class_range(class, __sched_class_highest, __sched_class_lowest)
+
+#define sched_class_above(_a, _b) ((_a) < (_b))
static inline bool sched_stop_runnable(struct rq *rq)
{
@@ -3698,6 +3733,24 @@ static inline void balance_callbacks(struct rq *rq, struct balance_callback *hea
#endif
+#ifdef CONFIG_SCHED_CLASS_EXT
+/*
+ * Used by SCX in the enable/disable paths to move tasks between sched_classes
+ * and establish invariants.
+ */
+struct sched_enq_and_set_ctx {
+ struct task_struct *p;
+ int queue_flags;
+ bool queued;
+ bool running;
+};
+
+void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
+ struct sched_enq_and_set_ctx *ctx);
+void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+
#include "ext.h"
#endif /* _KERNEL_SCHED_SCHED_H */
--
2.45.2
Powered by blists - more mailing lists