[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250819123214.GH4067720@noisy.programming.kicks-ass.net>
Date: Tue, 19 Aug 2025 14:32:14 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Menglong Dong <menglong8.dong@...il.com>
Cc: mingo@...hat.com, juri.lelli@...hat.com, vincent.guittot@...aro.org,
dietmar.eggemann@....com, rostedt@...dmis.org, bsegall@...gle.com,
mgorman@...e.de, vschneid@...hat.com, ast@...nel.org,
daniel@...earbox.net, john.fastabend@...il.com, andrii@...nel.org,
martin.lau@...ux.dev, eddyz87@...il.com, song@...nel.org,
yonghong.song@...ux.dev, kpsingh@...nel.org, sdf@...ichev.me,
haoluo@...gle.com, jolsa@...nel.org, simona.vetter@...ll.ch,
tzimmermann@...e.de, jani.nikula@...el.com,
linux-kernel@...r.kernel.org, bpf@...r.kernel.org
Subject: Re: [PATCH v2 2/3] sched: make migrate_enable/migrate_disable inline
On Tue, Aug 19, 2025 at 09:58:31AM +0800, Menglong Dong wrote:
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index be00629f0ba4..00383fed9f63 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -119,6 +119,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
> EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
>
> DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
> +EXPORT_SYMBOL_GPL(runqueues);
Oh no, absolutely not.
You never, ever, export a variable, and certainly not this one.
How about something like so?
I tried 'clever' things with export inline, but the compiler hates me,
so the below is the best I could make work.
---
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2315,6 +2315,7 @@ static __always_inline void alloc_tag_re
#define alloc_tag_restore(_tag, _old) do {} while (0)
#endif
+#ifndef MODULE
#ifndef COMPILE_OFFSETS
extern void __migrate_enable(void);
@@ -2328,7 +2329,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq
#define this_rq_raw() PERCPU_PTR(&runqueues)
#endif
-static inline void migrate_enable(void)
+static inline void _migrate_enable(void)
{
struct task_struct *p = current;
@@ -2363,7 +2364,7 @@ static inline void migrate_enable(void)
(*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))--;
}
-static inline void migrate_disable(void)
+static inline void _migrate_disable(void)
{
struct task_struct *p = current;
@@ -2382,10 +2383,30 @@ static inline void migrate_disable(void)
(*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))++;
p->migration_disabled = 1;
}
-#else
-static inline void migrate_disable(void) { }
-static inline void migrate_enable(void) { }
-#endif
+#else /* !COMPILE_OFFSETS */
+static inline void _migrate_disable(void) { }
+static inline void _migrate_enable(void) { }
+#endif /* !COMPILE_OFFSETS */
+
+#ifndef CREATE_MIGRATE_DISABLE
+static inline void migrate_disable(void)
+{
+ _migrate_disable();
+}
+
+static inline void migrate_enable(void)
+{
+ _migrate_enable();
+}
+#else /* CREATE_MIGRATE_DISABLE */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#endif /* CREATE_MIGRATE_DISABLE */
+
+#else /* !MODULE */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#endif /* !MODULE */
DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7,6 +7,9 @@
* Copyright (C) 1991-2002 Linus Torvalds
* Copyright (C) 1998-2024 Ingo Molnar, Red Hat
*/
+#define CREATE_MIGRATE_DISABLE
+#include <linux/sched.h>
+
#include <linux/highmem.h>
#include <linux/hrtimer_api.h>
#include <linux/ktime_api.h>
@@ -119,7 +122,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_updat
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-EXPORT_SYMBOL_GPL(runqueues);
#ifdef CONFIG_SCHED_PROXY_EXEC
DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
@@ -2382,6 +2384,11 @@ static void migrate_disable_switch(struc
__do_set_cpus_allowed(p, &ac);
}
+void migrate_disable(void)
+{
+ _migrate_disable();
+}
+
void __migrate_enable(void)
{
struct task_struct *p = current;
@@ -2392,7 +2399,11 @@ void __migrate_enable(void)
__set_cpus_allowed_ptr(p, &ac);
}
-EXPORT_SYMBOL_GPL(__migrate_enable);
+
+void migrate_enable(void)
+{
+ _migrate_enable();
+}
static inline bool rq_has_pinned_tasks(struct rq *rq)
{
Powered by blists - more mailing lists