[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aEFjsrkVAIskYJq3@gpd4>
Date: Thu, 5 Jun 2025 11:30:26 +0200
From: Andrea Righi <arighi@...dia.com>
To: Tejun Heo <tj@...nel.org>, David Vernet <void@...ifault.com>,
Changwoo Min <changwoo@...lia.com>
Cc: linux-kernel@...r.kernel.org
Subject: [PATCH v2 4/4] sched_ext: Make scx_locked_rq() inline
scx_locked_rq() is used both from ext.c and ext_idle.c, move it to ext.h
as a static inline function.
No functional changes.
v2: Rename locked_rq to scx_locked_rq_state, expose it and make
scx_locked_rq() inline, as suggested by Tejun.
Signed-off-by: Andrea Righi <arighi@...dia.com>
---
kernel/sched/ext.c | 13 ++-----------
kernel/sched/ext.h | 11 +++++++++++
2 files changed, 13 insertions(+), 11 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 3e483138dff60..3623ba98d7d83 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1247,7 +1247,7 @@ static void scx_kf_disallow(u32 mask)
* This allows kfuncs to safely operate on rq from any scx ops callback,
* knowing which rq is already locked.
*/
-static DEFINE_PER_CPU(struct rq *, locked_rq);
+DEFINE_PER_CPU(struct rq *, scx_locked_rq_state);
static inline void update_locked_rq(struct rq *rq)
{
@@ -1258,16 +1258,7 @@ static inline void update_locked_rq(struct rq *rq)
*/
if (rq)
lockdep_assert_rq_held(rq);
- __this_cpu_write(locked_rq, rq);
-}
-
-/*
- * Return the rq currently locked from an scx callback, or NULL if no rq is
- * locked.
- */
-static inline struct rq *scx_locked_rq(void)
-{
- return __this_cpu_read(locked_rq);
+ __this_cpu_write(scx_locked_rq_state, rq);
}
#define SCX_CALL_OP(sch, mask, op, rq, args...) \
diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h
index d30f2d1bc00d5..6d6d00e9de20f 100644
--- a/kernel/sched/ext.h
+++ b/kernel/sched/ext.h
@@ -20,6 +20,17 @@ static inline bool scx_rq_bypassing(struct rq *rq)
DECLARE_STATIC_KEY_FALSE(scx_ops_allow_queued_wakeup);
+DECLARE_PER_CPU(struct rq *, scx_locked_rq_state);
+
+/*
+ * Return the rq currently locked from an scx callback, or NULL if no rq is
+ * locked.
+ */
+static inline struct rq *scx_locked_rq(void)
+{
+ return __this_cpu_read(scx_locked_rq_state);
+}
+
void scx_tick(struct rq *rq);
void init_scx_entity(struct sched_ext_entity *scx);
void scx_pre_fork(struct task_struct *p);
--
2.49.0
Powered by blists - more mailing lists