lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240528003521.979836-7-ankur.a.arora@oracle.com>
Date: Mon, 27 May 2024 17:34:52 -0700
From: Ankur Arora <ankur.a.arora@...cle.com>
To: linux-kernel@...r.kernel.org
Cc: tglx@...utronix.de, peterz@...radead.org, torvalds@...ux-foundation.org,
        paulmck@...nel.org, rostedt@...dmis.org, mark.rutland@....com,
        juri.lelli@...hat.com, joel@...lfernandes.org, raghavendra.kt@....com,
        sshegde@...ux.ibm.com, boris.ostrovsky@...cle.com,
        konrad.wilk@...cle.com, Ankur Arora <ankur.a.arora@...cle.com>,
        Arnd Bergmann <arnd@...db.de>, Ingo Molnar <mingo@...hat.com>,
        Vincent Guittot <vincent.guittot@...aro.org>
Subject: [PATCH v2 06/35] thread_info: define __tif_need_resched(resched_t)

Define __tif_need_resched() which takes a resched_t parameter to
decide the immediacy of the need-resched.

Update need_resched() and should_resched() so they both check for
__tif_need_resched(RESCHED_NOW), which keeps the current semantics.

Non scheduling code -- which only cares about any immediately required
preemption -- can continue unchanged since the commonly used interfaces
(need_resched(), should_resched(), tif_need_resched()) stay the same.

This also allows lazy preemption to just be a scheduler detail.

Cc: Arnd Bergmann <arnd@...db.de>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Juri Lelli <juri.lelli@...hat.com>
Cc: Vincent Guittot <vincent.guittot@...aro.org>
Cc: Rafael J. Wysocki" <rafael@...nel.org>
Cc: Steven Rostedt <rostedt@...dmis.org>
Originally-by: Thomas Gleixner <tglx@...utronix.de>
Link: https://lore.kernel.org/lkml/87jzshhexi.ffs@tglx/
Signed-off-by: Ankur Arora <ankur.a.arora@...cle.com>
---
 include/linux/preempt.h     |  2 +-
 include/linux/sched.h       |  7 ++++++-
 include/linux/thread_info.h | 34 ++++++++++++++++++++++++++++------
 kernel/trace/trace.c        |  2 +-
 4 files changed, 36 insertions(+), 9 deletions(-)

diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index ce76f1a45722..d453f5e34390 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -312,7 +312,7 @@ do { \
 } while (0)
 #define preempt_fold_need_resched() \
 do { \
-	if (tif_need_resched()) \
+	if (__tif_need_resched(RESCHED_NOW)) \
 		set_preempt_need_resched(); \
 } while (0)
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4808e5dd4f69..37a51115b691 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2062,7 +2062,12 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
 
 static __always_inline bool need_resched(void)
 {
-	return unlikely(tif_need_resched());
+	return unlikely(__tif_need_resched(RESCHED_NOW));
+}
+
+static __always_inline bool need_resched_lazy(void)
+{
+	return unlikely(__tif_need_resched(RESCHED_LAZY));
 }
 
 /*
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 65e5beedc915..e246b01553a5 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -216,22 +216,44 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti
 
 #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
 
-static __always_inline bool tif_need_resched(void)
+static __always_inline bool __tif_need_resched_bitop(int nr_flag)
 {
-	return arch_test_bit(TIF_NEED_RESCHED,
-			     (unsigned long *)(&current_thread_info()->flags));
+	return arch_test_bit(nr_flag,
+		     (unsigned long *)(&current_thread_info()->flags));
 }
 
 #else
 
-static __always_inline bool tif_need_resched(void)
+static __always_inline bool __tif_need_resched_bitop(int nr_flag)
 {
-	return test_bit(TIF_NEED_RESCHED,
-			(unsigned long *)(&current_thread_info()->flags));
+	return test_bit(nr_flag,
+		(unsigned long *)(&current_thread_info()->flags));
 }
 
 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
 
+static __always_inline bool __tif_need_resched(resched_t rs)
+{
+	/*
+	 * With !PREEMPT_AUTO, this check is only meaningful if we
+	 * are checking if tif_resched(RESCHED_NOW) is set.
+	 */
+	if (IS_ENABLED(CONFIG_PREEMPT_AUTO) || rs == RESCHED_NOW)
+		return __tif_need_resched_bitop(tif_resched(rs));
+	else
+		return false;
+}
+
+static __always_inline bool tif_need_resched(void)
+{
+	return __tif_need_resched(RESCHED_NOW);
+}
+
+static __always_inline bool tif_need_resched_lazy(void)
+{
+	return __tif_need_resched(RESCHED_LAZY);
+}
+
 #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
 static inline int arch_within_stack_frames(const void * const stack,
 					   const void * const stackend,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 233d1af39fff..ed229527be05 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2511,7 +2511,7 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
 	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
 		trace_flags |= TRACE_FLAG_BH_OFF;
 
-	if (tif_need_resched())
+	if (__tif_need_resched(RESCHED_NOW))
 		trace_flags |= TRACE_FLAG_NEED_RESCHED;
 	if (test_preempt_need_resched())
 		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
-- 
2.31.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ