[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210811121417.124506093@linutronix.de>
Date: Wed, 11 Aug 2021 14:23:32 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Juri Lelli <juri.lelli@...hat.com>,
Steven Rostedt <rostedt@...dmis.org>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Davidlohr Bueso <dave@...olabs.net>,
Mike Galbraith <efault@....de>
Subject: [patch V4 50/68] locking/ww_mutex: Add rt_mutex based lock type and accessors
From: Peter Zijlstra <peterz@...radead.org>
Provide the defines for RT mutex based ww_mutexes and fixup the debug logic
so it's either enabled by DEBUG_MUTEXES or DEBUG_RT_MUTEXES on RT kernels.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
include/linux/ww_mutex.h | 33 ++++++++++++++++++++++++---------
kernel/locking/ww_mutex.h | 6 +++---
2 files changed, 27 insertions(+), 12 deletions(-)
---
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -18,11 +18,24 @@
#define __LINUX_WW_MUTEX_H
#include <linux/mutex.h>
+#include <linux/rtmutex.h>
+#if defined(CONFIG_DEBUG_MUTEXES) || \
+ (defined(CONFIG_PREEMPT_RT) && defined(CONFIG_DEBUG_RT_MUTEXES))
+#define DEBUG_WW_MUTEXES
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
#define WW_MUTEX_BASE mutex
#define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k)
#define ww_mutex_base_trylock(l) mutex_trylock(l)
#define ww_mutex_base_is_locked(b) mutex_is_locked((b))
+#else
+#define WW_MUTEX_BASE rt_mutex
+#define ww_mutex_base_init(l,n,k) __rt_mutex_init(l,n,k)
+#define ww_mutex_base_trylock(l) rt_mutex_trylock(l)
+#define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex)
+#endif
struct ww_class {
atomic_long_t stamp;
@@ -36,7 +49,7 @@ struct ww_class {
struct ww_mutex {
struct WW_MUTEX_BASE base;
struct ww_acquire_ctx *ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
struct ww_class *ww_class;
#endif
};
@@ -47,10 +60,10 @@ struct ww_acquire_ctx {
unsigned int acquired;
unsigned short wounded;
unsigned short is_wait_die;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
unsigned int done_acquire;
struct ww_class *ww_class;
- struct ww_mutex *contending_lock;
+ void *contending_lock;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -89,7 +102,7 @@ static inline void ww_mutex_init(struct
{
ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
lock->ctx = NULL;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
lock->ww_class = ww_class;
#endif
}
@@ -126,7 +139,7 @@ static inline void ww_acquire_init(struc
ctx->acquired = 0;
ctx->wounded = false;
ctx->is_wait_die = ww_class->is_wait_die;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
ctx->ww_class = ww_class;
ctx->done_acquire = 0;
ctx->contending_lock = NULL;
@@ -156,7 +169,7 @@ static inline void ww_acquire_init(struc
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
{
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
lockdep_assert_held(ctx);
DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
@@ -176,7 +189,7 @@ static inline void ww_acquire_fini(struc
#ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_release(&ctx->dep_map, _THIS_IP_);
#endif
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(ctx->acquired);
if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
/*
@@ -282,7 +295,7 @@ static inline void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
int ret;
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
ret = ww_mutex_lock(lock, ctx);
@@ -318,7 +331,7 @@ static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
return ww_mutex_lock_interruptible(lock, ctx);
@@ -348,7 +361,9 @@ static inline int __must_check ww_mutex_
*/
static inline void ww_mutex_destroy(struct ww_mutex *lock)
{
+#ifndef CONFIG_PREEMPT_RT
mutex_destroy(&lock->base);
+#endif
}
/**
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -180,7 +180,7 @@ static inline void lockdep_assert_wait_l
static __always_inline void
ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
{
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
/*
* If this WARN_ON triggers, you used ww_mutex_lock to acquire,
* but released with a normal mutex_unlock in this call.
@@ -413,7 +413,7 @@ static __always_inline int
__ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
{
if (ww_ctx->acquired > 0) {
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
struct ww_mutex *ww;
ww = container_of(lock, struct ww_mutex, base);
@@ -559,7 +559,7 @@ static inline int
static inline void __ww_mutex_unlock(struct ww_mutex *lock)
{
if (lock->ctx) {
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef DEBUG_WW_MUTEXES
DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
#endif
if (lock->ctx->acquired > 0)
Powered by blists - more mailing lists