[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1486387772-18837-17-git-send-email-mingo@kernel.org>
Date: Mon, 6 Feb 2017 14:28:19 +0100
From: Ingo Molnar <mingo@...nel.org>
To: linux-kernel@...r.kernel.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Linus Torvalds <torvalds@...ux-foundation.org>,
Mike Galbraith <efault@....de>,
Oleg Nesterov <oleg@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Thomas Gleixner <tglx@...utronix.de>
Subject: [PATCH 16/89] sched/idle: Move polling methods to <linux/sched/idle.h>
Further reduce the size of <linux/sched.h> by moving these APIs:
tsk_is_polling()
__current_set_polling()
current_set_polling_and_test()
__current_clr_polling()
current_clr_polling_and_test()
current_clr_polling()
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Mike Galbraith <efault@....de>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: linux-kernel@...r.kernel.org
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
arch/x86/include/asm/mwait.h | 1 +
arch/x86/kernel/process.c | 1 +
drivers/cpuidle/driver.c | 1 +
include/linux/sched.h | 76 ----------------------------------------------------------------
include/linux/sched/idle.h | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
kernel/sched/idle.c | 1 +
6 files changed, 80 insertions(+), 76 deletions(-)
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index f37f2d8a2989..bda3c27f0da0 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -2,6 +2,7 @@
#define _ASM_X86_MWAIT_H
#include <linux/sched.h>
+#include <linux/sched/idle.h>
#include <asm/cpufeature.h>
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index b615a1113f58..e9176466c587 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -7,6 +7,7 @@
#include <linux/prctl.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/sched/idle.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/pm.h>
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index ab264d393233..e53fb861beb0 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -11,6 +11,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/sched/idle.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/tick.h>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3842a0d822a9..93bb2b0b22fd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2888,82 +2888,6 @@ static inline int spin_needbreak(spinlock_t *lock)
#endif
}
-/*
- * Idle thread specific functions to determine the need_resched
- * polling state.
- */
-#ifdef TIF_POLLING_NRFLAG
-static inline int tsk_is_polling(struct task_struct *p)
-{
- return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
-}
-
-static inline void __current_set_polling(void)
-{
- set_thread_flag(TIF_POLLING_NRFLAG);
-}
-
-static inline bool __must_check current_set_polling_and_test(void)
-{
- __current_set_polling();
-
- /*
- * Polling state must be visible before we test NEED_RESCHED,
- * paired by resched_curr()
- */
- smp_mb__after_atomic();
-
- return unlikely(tif_need_resched());
-}
-
-static inline void __current_clr_polling(void)
-{
- clear_thread_flag(TIF_POLLING_NRFLAG);
-}
-
-static inline bool __must_check current_clr_polling_and_test(void)
-{
- __current_clr_polling();
-
- /*
- * Polling state must be visible before we test NEED_RESCHED,
- * paired by resched_curr()
- */
- smp_mb__after_atomic();
-
- return unlikely(tif_need_resched());
-}
-
-#else
-static inline int tsk_is_polling(struct task_struct *p) { return 0; }
-static inline void __current_set_polling(void) { }
-static inline void __current_clr_polling(void) { }
-
-static inline bool __must_check current_set_polling_and_test(void)
-{
- return unlikely(tif_need_resched());
-}
-static inline bool __must_check current_clr_polling_and_test(void)
-{
- return unlikely(tif_need_resched());
-}
-#endif
-
-static inline void current_clr_polling(void)
-{
- __current_clr_polling();
-
- /*
- * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
- * Once the bit is cleared, we'll get IPIs with every new
- * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
- * fold.
- */
- smp_mb(); /* paired with resched_curr() */
-
- preempt_fold_need_resched();
-}
-
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index 6bcb32118ab0..0c15c7ed906f 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -10,4 +10,80 @@ enum cpu_idle_type {
extern void wake_up_if_idle(int cpu);
+/*
+ * Idle thread specific functions to determine the need_resched
+ * polling state.
+ */
+#ifdef TIF_POLLING_NRFLAG
+static inline int tsk_is_polling(struct task_struct *p)
+{
+ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
+}
+
+static inline void __current_set_polling(void)
+{
+ set_thread_flag(TIF_POLLING_NRFLAG);
+}
+
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ __current_set_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_curr()
+ */
+ smp_mb__after_atomic();
+
+ return unlikely(tif_need_resched());
+}
+
+static inline void __current_clr_polling(void)
+{
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+}
+
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_curr()
+ */
+ smp_mb__after_atomic();
+
+ return unlikely(tif_need_resched());
+}
+
+#else
+static inline int tsk_is_polling(struct task_struct *p) { return 0; }
+static inline void __current_set_polling(void) { }
+static inline void __current_clr_polling(void) { }
+
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ return unlikely(tif_need_resched());
+}
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ return unlikely(tif_need_resched());
+}
+#endif
+
+static inline void current_clr_polling(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
+ * Once the bit is cleared, we'll get IPIs with every new
+ * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
+ * fold.
+ */
+ smp_mb(); /* paired with resched_curr() */
+
+ preempt_fold_need_resched();
+}
+
#endif /* _LINUX_SCHED_IDLE_H */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 6a4bae0a649d..ac6d5176463d 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -2,6 +2,7 @@
* Generic entry point for the idle threads
*/
#include <linux/sched.h>
+#include <linux/sched/idle.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/cpuhotplug.h>
--
2.7.4
Powered by blists - more mailing lists