[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250527222254.565881-15-lyude@redhat.com>
Date: Tue, 27 May 2025 18:21:55 -0400
From: Lyude Paul <lyude@...hat.com>
To: rust-for-linux@...r.kernel.org,
Thomas Gleixner <tglx@...utronix.de>,
Boqun Feng <boqun.feng@...il.com>,
linux-kernel@...r.kernel.org,
Daniel Almeida <daniel.almeida@...labora.com>
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Clark Williams <clrkwllms@...nel.org>,
Steven Rostedt <rostedt@...dmis.org>,
linux-rt-devel@...ts.linux.dev (open list:Real-time Linux (PREEMPT_RT):Keyword:PREEMPT_RT)
Subject: [RFC RESEND v10 14/14] locking: Switch to _irq_{disable,enable}() variants in cleanup guards
From: Boqun Feng <boqun.feng@...il.com>
The semantics of various irq disabling guards match what
*_irq_{disable,enable}() provide, i.e. the interrupt disabling is
properly nested, therefore it's OK to switch to use
*_irq_{disable,enable}() primitives.
Signed-off-by: Boqun Feng <boqun.feng@...il.com>
---
V10:
* Add PREEMPT_RT build fix from Guangbo Cui
Signed-off-by: Lyude Paul <lyude@...hat.com>
---
include/linux/spinlock.h | 26 ++++++++++++--------------
include/linux/spinlock_rt.h | 6 ++++++
2 files changed, 18 insertions(+), 14 deletions(-)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index b21da4bd51a42..7ff11c893940b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -605,10 +605,10 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
raw_spin_unlock(_T->lock))
DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
- raw_spin_lock_irq(_T->lock),
- raw_spin_unlock_irq(_T->lock))
+ raw_spin_lock_irq_disable(_T->lock),
+ raw_spin_unlock_irq_enable(_T->lock))
-DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq_disable(_T->lock))
DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
raw_spin_lock_bh(_T->lock),
@@ -617,12 +617,11 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock))
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
- raw_spin_lock_irqsave(_T->lock, _T->flags),
- raw_spin_unlock_irqrestore(_T->lock, _T->flags),
- unsigned long flags)
+ raw_spin_lock_irq_disable(_T->lock),
+ raw_spin_unlock_irq_enable(_T->lock))
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
- raw_spin_trylock_irqsave(_T->lock, _T->flags))
+ raw_spin_trylock_irq_disable(_T->lock))
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
@@ -631,11 +630,11 @@ DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
- spin_lock_irq(_T->lock),
- spin_unlock_irq(_T->lock))
+ spin_lock_irq_disable(_T->lock),
+ spin_unlock_irq_enable(_T->lock))
DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
- spin_trylock_irq(_T->lock))
+ spin_trylock_irq_disable(_T->lock))
DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
spin_lock_bh(_T->lock),
@@ -645,12 +644,11 @@ DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try,
spin_trylock_bh(_T->lock))
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
- spin_lock_irqsave(_T->lock, _T->flags),
- spin_unlock_irqrestore(_T->lock, _T->flags),
- unsigned long flags)
+ spin_lock_irq_disable(_T->lock),
+ spin_unlock_irq_enable(_T->lock))
DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
- spin_trylock_irqsave(_T->lock, _T->flags))
+ spin_trylock_irq_disable(_T->lock))
DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
read_lock(_T->lock),
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 6ea08fafa6d7b..f54e184735563 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -132,6 +132,12 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
rt_spin_unlock(lock);
}
+static __always_inline int spin_trylock_irq_disable(spinlock_t *lock)
+{
+ return rt_spin_trylock(lock);
+}
+
+
#define spin_trylock(lock) \
__cond_lock(lock, rt_spin_trylock(lock))
--
2.49.0
Powered by blists - more mailing lists