[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210713160746.897411744@linutronix.de>
Date: Tue, 13 Jul 2021 17:11:04 +0200
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Juri Lelli <juri.lelli@...hat.com>,
Steven Rostedt <rostedt@...dmis.org>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Davidlohr Bueso <dave@...olabs.net>
Subject: [patch 10/50] locking/rtmutex: Provide rt_mutex_slowlock_locked()
From: Thomas Gleixner <tglx@...utronix.de>
Split the inner workings of rt_mutex_slowlock() out into a seperate
function which can be reused by the upcoming RT lock substitutions,
e.g. for rw_semaphores.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
kernel/locking/rtmutex.c | 95 ++++++++++++++++++++++------------------
kernel/locking/rtmutex_api.c | 16 ++++++
kernel/locking/rtmutex_common.h | 3 +
3 files changed, 72 insertions(+), 42 deletions(-)
---
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1106,7 +1106,7 @@ static void __sched remove_waiter(struct
}
/**
- * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
+ * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
* @lock: the rt_mutex to take
* @state: the state the task should block in (TASK_INTERRUPTIBLE
* or TASK_UNINTERRUPTIBLE)
@@ -1115,9 +1115,10 @@ static void __sched remove_waiter(struct
*
* Must be called with lock->wait_lock held and interrupts disabled
*/
-static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state,
- struct hrtimer_sleeper *timeout,
- struct rt_mutex_waiter *waiter)
+static int __sched rt_mutex_slowlock_block(struct rt_mutex *lock,
+ unsigned int state,
+ struct hrtimer_sleeper *timeout,
+ struct rt_mutex_waiter *waiter)
{
int ret = 0;
@@ -1167,51 +1168,37 @@ static void __sched rt_mutex_handle_dead
}
}
-/*
- * Slow path lock function:
+/**
+ * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
+ * @lock: The rtmutex to block lock
+ * @state: The task state for sleeping
+ * @chwalk: Indicator whether full or partial chainwalk is requested
+ * @waiter: Initializer waiter for blocking
*/
-static int __sched rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state,
- struct hrtimer_sleeper *timeout,
- enum rtmutex_chainwalk chwalk)
+static int __sched __rt_mutex_slowlock(struct rt_mutex *lock,
+ unsigned int state,
+ enum rtmutex_chainwalk chwalk,
+ struct rt_mutex_waiter *waiter)
{
- struct rt_mutex_waiter waiter;
- unsigned long flags;
- int ret = 0;
-
- rt_mutex_init_waiter(&waiter);
+ int ret;
- /*
- * Technically we could use raw_spin_[un]lock_irq() here, but this can
- * be called in early boot if the cmpxchg() fast path is disabled
- * (debug, no architecture support). In this case we will acquire the
- * rtmutex with lock->wait_lock held. But we cannot unconditionally
- * enable interrupts in that early boot case. So we need to use the
- * irqsave/restore variants.
- */
- raw_spin_lock_irqsave(&lock->wait_lock, flags);
+ lockdep_assert_held(&lock->wait_lock);
/* Try to acquire the lock again: */
- if (try_to_take_rt_mutex(lock, current, NULL)) {
- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ if (try_to_take_rt_mutex(lock, current, NULL))
return 0;
- }
set_current_state(state);
- /* Setup the timer, when timeout != NULL */
- if (unlikely(timeout))
- hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
-
- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
+ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
if (likely(!ret))
- /* sleep on the mutex */
- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
+ ret = rt_mutex_slowlock_block(lock, state, NULL, waiter);
if (unlikely(ret)) {
__set_current_state(TASK_RUNNING);
- remove_waiter(lock, &waiter);
- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+ remove_waiter(lock, waiter);
+ rt_mutex_handle_deadlock(ret, chwalk, waiter);
}
/*
@@ -1219,14 +1206,40 @@ static int __sched rt_mutex_slowlock(str
* unconditionally. We might have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
+ return ret;
+}
- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+static inline int __rt_mutex_slowlock_locked(struct rt_mutex *lock,
+ unsigned int state)
+{
+ struct rt_mutex_waiter waiter;
- /* Remove pending timer: */
- if (unlikely(timeout))
- hrtimer_cancel(&timeout->timer);
+ rt_mutex_init_waiter(&waiter);
+
+ return __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter);
+}
+
+/*
+ * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
+ * @lock: The rtmutex to block lock
+ * @state: The task state for sleeping
+ */
+static int __sched rt_mutex_slowlock(struct rt_mutex *lock, unsigned int state)
+{
+ unsigned long flags;
+ int ret;
- debug_rt_mutex_free_waiter(&waiter);
+ /*
+ * Technically we could use raw_spin_[un]lock_irq() here, but this can
+ * be called in early boot if the cmpxchg() fast path is disabled
+ * (debug, no architecture support). In this case we will acquire the
+ * rtmutex with lock->wait_lock held. But we cannot unconditionally
+ * enable interrupts in that early boot case. So we need to use the
+ * irqsave/restore variants.
+ */
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+ ret = __rt_mutex_slowlock_locked(lock, state);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return ret;
}
@@ -1237,7 +1250,7 @@ static __always_inline int __rt_mutex_lo
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0;
- return rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
+ return rt_mutex_slowlock(lock, state);
}
static int __sched __rt_mutex_slowtrylock(struct rt_mutex *lock)
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -337,7 +337,7 @@ int __sched rt_mutex_wait_proxy_lock(str
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
+ ret = rt_mutex_slowlock_block(lock, TASK_INTERRUPTIBLE, to, waiter);
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
@@ -451,3 +451,17 @@ void rt_mutex_debug_task_free(struct tas
DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
}
#endif
+
+/* Interfaces for PREEMPT_RT lock substitutions */
+#ifdef CONFIG_PREEMPT_RT
+/**
+ * rwsem_rt_mutex_slowlock_locked - Lock slowpath invoked with @lock::wait_lock held
+ * @lock: The rtmutex to acquire
+ * @state: The task state for blocking
+ */
+int __sched rwsem_rt_mutex_slowlock_locked(struct rt_mutex *lock,
+ unsigned int state)
+{
+ return __rt_mutex_slowlock_locked(lock, state);
+}
+#endif
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -64,6 +64,9 @@ extern bool __rt_mutex_futex_unlock(stru
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
+/* Special interfaces for RT lock substitutions */
+int rwsem_rt_mutex_slowlock_locked(struct rt_mutex *lock, unsigned int state);
+
/*
* Must be guarded because this header is included from rcu/tree_plugin.h
* unconditionally.
Powered by blists - more mailing lists