[<prev] [next>] [day] [month] [year] [list]
Message-ID: <161288550623.23325.15273780016472163.tip-bot2@tip-bot2>
Date: Tue, 09 Feb 2021 15:45:06 -0000
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: "Peter Zijlstra (Intel)" <peterz@...radead.org>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: sched/core] sched: Harden PREEMPT_DYNAMIC
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 355b3a57ddba71b73a99aa249a99aed6ed904606
Gitweb: https://git.kernel.org/tip/355b3a57ddba71b73a99aa249a99aed6ed904606
Author: Peter Zijlstra <peterz@...radead.org>
AuthorDate: Mon, 25 Jan 2021 16:26:50 +01:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Tue, 09 Feb 2021 16:31:03 +01:00
sched: Harden PREEMPT_DYNAMIC
Use the new EXPORT_STATIC_CALL_TRAMP() / static_call_mod() to unexport
the static_call_key for the PREEMPT_DYNAMIC calls such that modules
can no longer update these calls.
Having modules change/hi-jack the preemption calls would be horrible.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
arch/x86/include/asm/preempt.h | 4 ++--
include/linux/kernel.h | 2 +-
include/linux/sched.h | 2 +-
kernel/sched/core.c | 8 ++++----
4 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 9b12dce..0aa96f8 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -114,7 +114,7 @@ DECLARE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
#define __preempt_schedule() \
do { \
- __ADDRESSABLE(STATIC_CALL_KEY(preempt_schedule)); \
+ __STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule); \
asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule) : ASM_CALL_CONSTRAINT); \
} while (0)
@@ -127,7 +127,7 @@ DECLARE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
#define __preempt_schedule_notrace() \
do { \
- __ADDRESSABLE(STATIC_CALL_KEY(preempt_schedule_notrace)); \
+ __STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule_notrace); \
asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule_notrace) : ASM_CALL_CONSTRAINT); \
} while (0)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cfd3d34..5b7ed6d 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -93,7 +93,7 @@ DECLARE_STATIC_CALL(might_resched, __cond_resched);
static __always_inline void might_resched(void)
{
- static_call(might_resched)();
+ static_call_mod(might_resched)();
}
#else
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2f35594..4d56828 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1880,7 +1880,7 @@ DECLARE_STATIC_CALL(cond_resched, __cond_resched);
static __always_inline int _cond_resched(void)
{
- return static_call(cond_resched)();
+ return static_call_mod(cond_resched)();
}
#else
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cb226f7..d93d02b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5253,7 +5253,7 @@ EXPORT_SYMBOL(preempt_schedule);
#ifdef CONFIG_PREEMPT_DYNAMIC
DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
-EXPORT_STATIC_CALL(preempt_schedule);
+EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
#endif
@@ -5311,7 +5311,7 @@ EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
#ifdef CONFIG_PREEMPT_DYNAMIC
DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
-EXPORT_STATIC_CALL(preempt_schedule_notrace);
+EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
#endif
#endif /* CONFIG_PREEMPTION */
@@ -6983,10 +6983,10 @@ EXPORT_SYMBOL(__cond_resched);
#ifdef CONFIG_PREEMPT_DYNAMIC
DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
-EXPORT_STATIC_CALL(cond_resched);
+EXPORT_STATIC_CALL_TRAMP(cond_resched);
DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
-EXPORT_STATIC_CALL(might_resched);
+EXPORT_STATIC_CALL_TRAMP(might_resched);
#endif
/*
Powered by blists - more mailing lists