[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20201009122926.29962-5-mhocko@kernel.org>
Date: Fri, 9 Oct 2020 14:29:25 +0200
From: Michal Hocko <mhocko@...nel.org>
To: Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>
Cc: Mel Gorman <mgorman@...e.de>,
Frederic Weisbecker <fweisbecker@...e.de>,
Ingo Molnar <mingo@...hat.com>,
LKML <linux-kernel@...r.kernel.org>,
Michal Hocko <mhocko@...e.com>
Subject: [RFC PATCH v2 4/5] kernel: introduce CONFIG_PREEMPT_DYNAMIC
From: Michal Hocko <mhocko@...e.com>
Boot time preemption mode selection is currently hardcoded for
!CONFIG_PREEMPTION. Peter has suggested to introduce a dedicated
option for the functionality because not each archiveture implements
implements static branches (jump labels) effectively and therefore
an additional overhead might be prohibitive or undesirable.
Introduce CONFIG_PREEMPT_DYNAMIC that allows boot time preemption mode
override. The functionality is currently implemented for PREEMPT_NONE
and PREEMPT_VOLUNTARY preemption modes.
Suggested-by: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Michal Hocko <mhocko@...e.com>
---
include/linux/kernel.h | 20 ++++++++++++++++++--
include/linux/sched.h | 12 ------------
kernel/Kconfig.preempt | 19 +++++++++++++++++++
kernel/sched/core.c | 6 +++++-
4 files changed, 42 insertions(+), 15 deletions(-)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d2d37bd5ecd5..b61ab02dba84 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -193,20 +193,36 @@ struct completion;
struct pt_regs;
struct user;
+/*
+ * cond_resched() and cond_resched_lock(): latency reduction via
+ * explicit rescheduling in places that are safe. The return
+ * value indicates whether a reschedule was done in fact.
+ * cond_resched_lock() will drop the spinlock before scheduling,
+ */
#ifndef CONFIG_PREEMPTION
+extern int _cond_resched(void);
+#else
+static inline int _cond_resched(void) { return 0; }
+#endif
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
#ifdef CONFIG_PREEMPT_VOLUNTARY
DECLARE_STATIC_KEY_TRUE(preempt_voluntary_key);
#else
DECLARE_STATIC_KEY_FALSE(preempt_voluntary_key);
#endif
-extern int _cond_resched(void);
# define might_resched() \
do { if (static_branch_likely(&preempt_voluntary_key)) _cond_resched(); } while (0)
#else
+
+#ifdef CONFIG_PREEMPT_VOLUNTARY
# define might_resched() \
- do { } while (0)
+ do { _cond_resched(); } while (0)
+#else
+# define might_resched() do { } while (0)
#endif
+#endif /* CONFIG_PREEMPT_DYNAMIC */
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
extern void ___might_sleep(const char *file, int line, int preempt_offset);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index afe01e232935..184b5e162184 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1812,18 +1812,6 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
-/*
- * cond_resched() and cond_resched_lock(): latency reduction via
- * explicit rescheduling in places that are safe. The return
- * value indicates whether a reschedule was done in fact.
- * cond_resched_lock() will drop the spinlock before scheduling,
- */
-#ifndef CONFIG_PREEMPTION
-extern int _cond_resched(void);
-#else
-static inline int _cond_resched(void) { return 0; }
-#endif
-
#define cond_resched() ({ \
___might_sleep(__FILE__, __LINE__, 0); \
_cond_resched(); \
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index c460a9a2373b..e142f36dd429 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -73,6 +73,25 @@ config PREEMPT_RT
endchoice
+config PREEMPT_DYNAMIC
+ bool "Allow boot time preemption model selection"
+ depends on PREEMPT_NONE || PREEMPT_VOLUNTARY
+ help
+ This option allows to define the preemption model on the kernel
+ command line parameter and thus override the default preemption
+ model defined during compile time.
+
+ The feature is primarily interesting for Linux distributions which
+ provide a pre-built kernel binary to reduce the number of kernel
+ flavors they offer while still offering different usecases.
+
+ The runtime overhead is negligible with JUMP_LABELS enabled but if
+ runtime patching is not available for the specific architecture then
+ the potential overhead should be considered.
+
+ Select if you the same pre-built kernel should be used for both Server
+ and Desktop workloads.
+
config PREEMPT_COUNT
bool
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 07d37d862637..fe22b2fca864 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -43,6 +43,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
#ifdef CONFIG_PREEMPT_VOLUNTARY
DEFINE_STATIC_KEY_TRUE(preempt_voluntary_key);
#else
@@ -51,6 +53,8 @@ DEFINE_STATIC_KEY_FALSE(preempt_voluntary_key);
#endif
EXPORT_SYMBOL(preempt_voluntary_key);
+#endif
+
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
/*
* Debugging: various feature bits
@@ -8491,7 +8495,7 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count)
trace_sched_update_nr_running_tp(rq, count);
}
-#ifndef CONFIG_PREEMPTION
+#ifdef CONFIG_PREEMPT_DYNAMIC
static int __init setup_non_preempt_mode(char *str)
{
if (!strcmp(str, "none")) {
--
2.28.0
Powered by blists - more mailing lists