[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210815211304.678720245@linutronix.de>
Date: Sun, 15 Aug 2021 23:28:50 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Juri Lelli <juri.lelli@...hat.com>,
Steven Rostedt <rostedt@...dmis.org>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Davidlohr Bueso <dave@...olabs.net>,
Mike Galbraith <efault@....de>
Subject: [patch V5 48/72] locking/ww_mutex: Abstract mutex types
From: Peter Zijlstra <peterz@...radead.org>
Some ww_mutex helper functions use pointers for the underlying mutex and
mutex_waiter. The upcoming rtmutex based implementation needs to share
these functions. Add and use defines for the types and replace the direct
types in the affected functions.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
kernel/locking/ww_mutex.h | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)
---
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+#define MUTEX mutex
+#define MUTEX_WAITER mutex_waiter
+
static inline struct mutex_waiter *
__ww_waiter_first(struct mutex *lock)
{
@@ -143,7 +146,7 @@ static inline bool
* __ww_mutex_check_kill() wake any but the earliest context.
*/
static bool
-__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
+__ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
struct ww_acquire_ctx *ww_ctx)
{
if (!ww_ctx->is_wait_die)
@@ -165,7 +168,7 @@ static bool
* the lock holders. Even if multiple waiters may wound the lock holder,
* it's sufficient that only one does.
*/
-static bool __ww_mutex_wound(struct mutex *lock,
+static bool __ww_mutex_wound(struct MUTEX *lock,
struct ww_acquire_ctx *ww_ctx,
struct ww_acquire_ctx *hold_ctx)
{
@@ -220,9 +223,9 @@ static bool __ww_mutex_wound(struct mute
* The current task must not be on the wait list.
*/
static void
-__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
{
- struct mutex_waiter *cur;
+ struct MUTEX_WAITER *cur;
lockdep_assert_held(&lock->wait_lock);
@@ -278,7 +281,7 @@ ww_mutex_set_context_fastpath(struct ww_
}
static __always_inline int
-__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+__ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
{
if (ww_ctx->acquired > 0) {
#ifdef CONFIG_DEBUG_MUTEXES
@@ -306,12 +309,12 @@ static __always_inline int
* look at waiters before us in the wait-list.
*/
static inline int
-__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
+__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
struct ww_acquire_ctx *ctx)
{
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
- struct mutex_waiter *cur;
+ struct MUTEX_WAITER *cur;
if (ctx->acquired == 0)
return 0;
@@ -354,11 +357,11 @@ static inline int
* Wound-Wait ensure we wound the owning context when it is younger.
*/
static inline int
-__ww_mutex_add_waiter(struct mutex_waiter *waiter,
- struct mutex *lock,
+__ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
+ struct MUTEX *lock,
struct ww_acquire_ctx *ww_ctx)
{
- struct mutex_waiter *cur, *pos = NULL;
+ struct MUTEX_WAITER *cur, *pos = NULL;
bool is_wait_die;
if (!ww_ctx) {
Powered by blists - more mailing lists