[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231216032651.3553101-6-kent.overstreet@linux.dev>
Date: Fri, 15 Dec 2023 22:26:15 -0500
From: Kent Overstreet <kent.overstreet@...ux.dev>
To: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org
Cc: Kent Overstreet <kent.overstreet@...ux.dev>,
tglx@...utronix.de,
x86@...nel.org,
tj@...nel.org,
peterz@...radead.org,
mathieu.desnoyers@...icios.com,
paulmck@...nel.org,
keescook@...omium.org,
dave.hansen@...ux.intel.com,
mingo@...hat.com,
will@...nel.org,
longman@...hat.com,
boqun.feng@...il.com,
brauner@...nel.org
Subject: [PATCH 16/50] sched.h: Move (spin|rwlock)_needbreak() to spinlock.h
This lets us kill the dependency on spinlock.h.
Signed-off-by: Kent Overstreet <kent.overstreet@...ux.dev>
---
include/linux/sched.h | 31 -------------------------------
include/linux/spinlock.h | 31 +++++++++++++++++++++++++++++++
2 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5a5b7b122682..7501a3451a20 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2227,37 +2227,6 @@ static inline bool preempt_model_preemptible(void)
return preempt_model_full() || preempt_model_rt();
}
-/*
- * Does a critical section need to be broken due to another
- * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
- * but a general need for low latency)
- */
-static inline int spin_needbreak(spinlock_t *lock)
-{
-#ifdef CONFIG_PREEMPTION
- return spin_is_contended(lock);
-#else
- return 0;
-#endif
-}
-
-/*
- * Check if a rwlock is contended.
- * Returns non-zero if there is another task waiting on the rwlock.
- * Returns zero if the lock is not contended or the system / underlying
- * rwlock implementation does not support contention detection.
- * Technically does not depend on CONFIG_PREEMPTION, but a general need
- * for low latency.
- */
-static inline int rwlock_needbreak(rwlock_t *lock)
-{
-#ifdef CONFIG_PREEMPTION
- return rwlock_is_contended(lock);
-#else
- return 0;
-#endif
-}
-
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 31d3d747a9db..0c71f06454d9 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -449,6 +449,37 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
return raw_spin_is_contended(&lock->rlock);
}
+/*
+ * Does a critical section need to be broken due to another
+ * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
+ * but a general need for low latency)
+ */
+static inline int spin_needbreak(spinlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return spin_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+#ifdef CONFIG_PREEMPTION
+ return rwlock_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
#else /* !CONFIG_PREEMPT_RT */
--
2.43.0
Powered by blists - more mailing lists