[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230815111430.421408298@infradead.org>
Date: Tue, 15 Aug 2023 13:01:26 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: bigeasy@...utronix.de, tglx@...utronix.de
Cc: linux-kernel@...r.kernel.org, peterz@...radead.org,
bsegall@...gle.com, boqun.feng@...il.com, swood@...hat.com,
bristot@...hat.com, dietmar.eggemann@....com, mingo@...hat.com,
jstultz@...gle.com, juri.lelli@...hat.com, mgorman@...e.de,
rostedt@...dmis.org, vschneid@...hat.com,
vincent.guittot@...aro.org, longman@...hat.com, will@...nel.org
Subject: [PATCH 5/6] locking/rtmutex: Use rt_mutex specific scheduler helpers
From: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Have rt_mutex use the rt_mutex specific scheduler helpers to avoid
recursion vs rtlock on the PI state.
[[ peterz: adapted to new names ]]
Reported-by: Crystal Wood <swood@...hat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/locking/rtmutex.c | 14 ++++++++++++--
kernel/locking/rwbase_rt.c | 2 ++
kernel/locking/rwsem.c | 8 +++++++-
kernel/locking/spinlock_rt.c | 4 ++++
4 files changed, 25 insertions(+), 3 deletions(-)
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1636,7 +1636,7 @@ static int __sched rt_mutex_slowlock_blo
raw_spin_unlock_irq(&lock->wait_lock);
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
- schedule();
+ rt_mutex_schedule();
raw_spin_lock_irq(&lock->wait_lock);
set_current_state(state);
@@ -1665,7 +1665,7 @@ static void __sched rt_mutex_handle_dead
WARN(1, "rtmutex deadlock detected\n");
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- schedule();
+ rt_mutex_schedule();
}
}
@@ -1761,6 +1761,15 @@ static int __sched rt_mutex_slowlock(str
int ret;
/*
+ * Do all pre-schedule work here, before we queue a waiter and invoke
+ * PI -- any such work that trips on rtlock (PREEMPT_RT spinlock) would
+ * otherwise recurse back into task_blocks_on_rt_mutex() through
+ * rtlock_slowlock() and will then enqueue a second waiter for this
+ * same task and things get really confusing real fast.
+ */
+ rt_mutex_pre_schedule();
+
+ /*
* Technically we could use raw_spin_[un]lock_irq() here, but this can
* be called in early boot if the cmpxchg() fast path is disabled
* (debug, no architecture support). In this case we will acquire the
@@ -1771,6 +1780,7 @@ static int __sched rt_mutex_slowlock(str
raw_spin_lock_irqsave(&lock->wait_lock, flags);
ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ rt_mutex_post_schedule();
return ret;
}
--- a/kernel/locking/rwbase_rt.c
+++ b/kernel/locking/rwbase_rt.c
@@ -71,6 +71,7 @@ static int __sched __rwbase_read_lock(st
struct rt_mutex_base *rtm = &rwb->rtmutex;
int ret;
+ rwbase_pre_schedule();
raw_spin_lock_irq(&rtm->wait_lock);
/*
@@ -125,6 +126,7 @@ static int __sched __rwbase_read_lock(st
rwbase_rtmutex_unlock(rtm);
trace_contention_end(rwb, ret);
+ rwbase_post_schedule();
return ret;
}
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -1427,8 +1427,14 @@ static inline void __downgrade_write(str
#define rwbase_signal_pending_state(state, current) \
signal_pending_state(state, current)
+#define rwbase_pre_schedule() \
+ rt_mutex_pre_schedule()
+
#define rwbase_schedule() \
- schedule()
+ rt_mutex_schedule()
+
+#define rwbase_post_schedule() \
+ rt_mutex_post_schedule()
#include "rwbase_rt.c"
--- a/kernel/locking/spinlock_rt.c
+++ b/kernel/locking/spinlock_rt.c
@@ -184,9 +184,13 @@ static __always_inline int rwbase_rtmut
#define rwbase_signal_pending_state(state, current) (0)
+#define rwbase_pre_schedule()
+
#define rwbase_schedule() \
schedule_rtlock()
+#define rwbase_post_schedule()
+
#include "rwbase_rt.c"
/*
* The common functions which get wrapped into the rwlock API.
Powered by blists - more mailing lists