lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210713160749.648533960@linutronix.de>
Date:   Tue, 13 Jul 2021 17:11:32 +0200
From:   Thomas Gleixner <tglx@...utronix.de>
To:     LKML <linux-kernel@...r.kernel.org>
Cc:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...nel.org>,
        Juri Lelli <juri.lelli@...hat.com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Daniel Bristot de Oliveira <bristot@...hat.com>,
        Will Deacon <will@...nel.org>,
        Waiman Long <longman@...hat.com>,
        Boqun Feng <boqun.feng@...il.com>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
        Davidlohr Bueso <dave@...olabs.net>
Subject: [patch 38/50] locking/mutex: Exclude non-ww_mutex API for RT

From: Thomas Gleixner <tglx@...utronix.de>

In order to build ww_mutex standalone on RT and to replace mutex with a RT
specific rtmutex based variant, guard the non-ww_mutex API so it is only
built when CONFIG_PREEMPT_RT is disabled.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
 kernel/locking/mutex.c |   33 +++++++++++++++++++++++----------
 1 file changed, 23 insertions(+), 10 deletions(-)
---
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -242,7 +242,7 @@ static void __mutex_handoff(_mutex_t *lo
 	}
 }
 
-#ifndef CONFIG_DEBUG_LOCK_ALLOC
+#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_PREEMPT_RT)
 /*
  * We split the mutex lock/unlock logic into separate fastpath and
  * slowpath functions, to reduce the register pressure on the fastpath.
@@ -280,7 +280,7 @@ void __sched mutex_lock(struct mutex *lo
 		__mutex_lock_slowpath(lock);
 }
 EXPORT_SYMBOL(mutex_lock);
-#endif
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC && !CONFIG_PREEMPT_RT */
 
 /*
  * Wait-Die:
@@ -705,17 +705,27 @@ mutex_optimistic_spin(_mutex_t *lock, st
 
 	return false;
 }
-#else
+#else /* CONFIG_MUTEX_SPIN_ON_OWNER */
 static __always_inline bool
 mutex_optimistic_spin(_mutex_t *lock, struct ww_acquire_ctx *ww_ctx,
 		      struct mutex_waiter *waiter)
 {
 	return false;
 }
-#endif
+#endif /* !CONFIG_MUTEX_SPIN_ON_OWNER */
 
 static noinline void __sched __mutex_unlock_slowpath(_mutex_t *lock, unsigned long ip);
 
+static __always_inline void __mutex_unlock(_mutex_t *lock)
+{
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+	if (__mutex_unlock_fast(lock))
+		return;
+#endif
+	__mutex_unlock_slowpath(lock, _RET_IP_);
+}
+
+#ifndef CONFIG_PREEMPT_RT
 /**
  * mutex_unlock - release the mutex
  * @lock: the mutex to be released
@@ -729,13 +739,10 @@ static noinline void __sched __mutex_unl
  */
 void __sched mutex_unlock(struct mutex *lock)
 {
-#ifndef CONFIG_DEBUG_LOCK_ALLOC
-	if (__mutex_unlock_fast(lock))
-		return;
-#endif
-	__mutex_unlock_slowpath(lock, _RET_IP_);
+	__mutex_unlock(lock);
 }
 EXPORT_SYMBOL(mutex_unlock);
+#endif /* !CONFIG_PREEMPT_RT */
 
 /**
  * ww_mutex_unlock - release the w/w mutex
@@ -763,7 +770,7 @@ void __sched ww_mutex_unlock(struct ww_m
 		lock->ctx = NULL;
 	}
 
-	mutex_unlock(&lock->base);
+	__mutex_unlock(&lock->base);
 }
 EXPORT_SYMBOL(ww_mutex_unlock);
 
@@ -1093,12 +1100,14 @@ static __always_inline int __sched
 	return ret;
 }
 
+#ifndef CONFIG_PREEMPT_RT
 static int __sched
 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
 	     struct lockdep_map *nest_lock, unsigned long ip)
 {
 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
 }
+#endif /* !CONFIG_PREEMPT_RT */
 
 static int __sched
 __ww_mutex_lock(_mutex_t *lock, unsigned int state, unsigned int subclass,
@@ -1109,6 +1118,7 @@ static int __sched
 }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
+# ifndef CONFIG_PREEMPT_RT
 void __sched
 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
 {
@@ -1151,6 +1161,7 @@ mutex_lock_io_nested(struct mutex *lock,
 	io_schedule_finish(token);
 }
 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
+# endif /* !CONFIG_PREEMPT_RT */
 
 static inline int
 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
@@ -1278,6 +1289,7 @@ static noinline void __sched __mutex_unl
 }
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
+#ifndef CONFIG_PREEMPT_RT
 /*
  * Here come the less common (and hence less performance-critical) APIs:
  * mutex_lock_interruptible() and mutex_trylock().
@@ -1372,6 +1384,7 @@ static noinline int __sched
 {
 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
 }
+#endif /* !CONFIG_PREEMPT_RT */
 
 static noinline int __sched
 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ