[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230307203122.14b077c4@gandalf.local.home>
Date: Tue, 7 Mar 2023 20:31:22 -0500
From: Steven Rostedt <rostedt@...dmis.org>
To: John Stultz <jstultz@...gle.com>
Cc: LKML <linux-kernel@...r.kernel.org>, Wei Wang <wvw@...gle.com>,
Midas Chien <midaschieh@...gle.com>,
Kees Cook <keescook@...omium.org>,
Anton Vorontsov <anton@...msg.org>,
"Guilherme G. Piccoli" <gpiccoli@...lia.com>,
Tony Luck <tony.luck@...el.com>, kernel-team@...roid.com,
Thomas Gleixner <tglx@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Subject: Re: [PATCH] pstore: Revert pmsg_lock back to a normal mutex
On Thu, 2 Mar 2023 20:01:36 -0500
Steven Rostedt <rostedt@...dmis.org> wrote:
> @@ -1421,11 +1425,23 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
> * for CONFIG_PREEMPT_RCU=y)
> * - the VCPU on which owner runs is preempted
> */
> - if (!owner_on_cpu(owner) || need_resched() ||
> - !rt_mutex_waiter_is_top_waiter(lock, waiter)) {
> + if (!owner_on_cpu(owner) || need_resched()) {
> res = false;
> break;
> }
> + top_waiter = rt_mutex_top_waiter(lock);
rt_mutex_top_waiter() can not be called outside the wait_lock, as it may
trigger that BUG_ON() you saw.
New patch below.
> + if (top_waiter != waiter) {
> + if (top_waiter != last_waiter) {
> + raw_spin_lock_irq(&lock->wait_lock);
> + last_waiter = rt_mutex_top_waiter(lock);
> + top_task = last_waiter->task;
> + raw_spin_unlock_irq(&lock->wait_lock);
> + }
> + if (!owner_on_cpu(top_task)) {
> + res = false;
> + break;
> + }
> + }
-- Steve
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 7779ee8abc2a..f7b0cc3be20e 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1362,8 +1362,11 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *owner)
{
+ struct rt_mutex_waiter *top_waiter = NULL;
+ struct task_struct *top_task = NULL;
bool res = true;
+ /* rcu_read_lock keeps task_structs around */
rcu_read_lock();
for (;;) {
/* If owner changed, trylock again. */
@@ -1384,11 +1387,22 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
* for CONFIG_PREEMPT_RCU=y)
* - the VCPU on which owner runs is preempted
*/
- if (!owner_on_cpu(owner) || need_resched() ||
- !rt_mutex_waiter_is_top_waiter(lock, waiter)) {
+ if (!owner_on_cpu(owner) || need_resched()) {
res = false;
break;
}
+ if (!rt_mutex_waiter_is_top_waiter(lock, waiter)) {
+ if (!rt_mutex_waiter_is_top_waiter(lock, top_waiter)) {
+ raw_spin_lock_irq(&lock->wait_lock);
+ top_waiter = rt_mutex_top_waiter(lock);
+ top_task = top_waiter->task;
+ raw_spin_unlock_irq(&lock->wait_lock);
+ }
+ if (!owner_on_cpu(top_task)) {
+ res = false;
+ break;
+ }
+ }
cpu_relax();
}
rcu_read_unlock();
@@ -1510,10 +1524,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
break;
}
- if (waiter == rt_mutex_top_waiter(lock))
- owner = rt_mutex_owner(lock);
- else
- owner = NULL;
+ owner = rt_mutex_owner(lock);
raw_spin_unlock_irq(&lock->wait_lock);
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
@@ -1699,10 +1710,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
if (try_to_take_rt_mutex(lock, current, &waiter))
break;
- if (&waiter == rt_mutex_top_waiter(lock))
- owner = rt_mutex_owner(lock);
- else
- owner = NULL;
+ owner = rt_mutex_owner(lock);
raw_spin_unlock_irq(&lock->wait_lock);
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
Powered by blists - more mailing lists