[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <172976262647.1442.12633197284362217278.tip-bot2@tip-bot2>
Date: Thu, 24 Oct 2024 09:37:06 -0000
From: "tip-bot2 for Sebastian Andrzej Siewior" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Thomas Gleixner <tglx@...utronix.de>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: locking/core] locking/rt: Add sparse annotation for RCU.
The following commit has been merged into the locking/core branch of tip:
Commit-ID: 168660b826a77fda28235e0b0b3027041d6a5240
Gitweb: https://git.kernel.org/tip/168660b826a77fda28235e0b0b3027041d6a5240
Author: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
AuthorDate: Mon, 12 Aug 2024 12:39:04 +02:00
Committer: Thomas Gleixner <tglx@...utronix.de>
CommitterDate: Thu, 24 Oct 2024 11:27:02 +02:00
locking/rt: Add sparse annotation for RCU.
Every lock, that becomes a sleeping lock on PREEMPT_RT, starts a RCU read
side critical section. There is no sparse annotation for this and sparse
complains about unbalanced locking.
Add __acquires/ __releases for the RCU lock. This covers all but the
trylock functions. A __cond_acquires() annotation didn't work.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Link: https://lore.kernel.org/all/20240812104200.2239232-4-bigeasy@linutronix.de
---
kernel/locking/spinlock_rt.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c
index 38e2924..d1cf8b2 100644
--- a/kernel/locking/spinlock_rt.c
+++ b/kernel/locking/spinlock_rt.c
@@ -51,7 +51,7 @@ static __always_inline void __rt_spin_lock(spinlock_t *lock)
migrate_disable();
}
-void __sched rt_spin_lock(spinlock_t *lock)
+void __sched rt_spin_lock(spinlock_t *lock) __acquires(RCU)
{
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
__rt_spin_lock(lock);
@@ -75,7 +75,7 @@ void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
EXPORT_SYMBOL(rt_spin_lock_nest_lock);
#endif
-void __sched rt_spin_unlock(spinlock_t *lock)
+void __sched rt_spin_unlock(spinlock_t *lock) __releases(RCU)
{
spin_release(&lock->dep_map, _RET_IP_);
migrate_enable();
@@ -225,7 +225,7 @@ int __sched rt_write_trylock(rwlock_t *rwlock)
}
EXPORT_SYMBOL(rt_write_trylock);
-void __sched rt_read_lock(rwlock_t *rwlock)
+void __sched rt_read_lock(rwlock_t *rwlock) __acquires(RCU)
{
rtlock_might_resched();
rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
@@ -235,7 +235,7 @@ void __sched rt_read_lock(rwlock_t *rwlock)
}
EXPORT_SYMBOL(rt_read_lock);
-void __sched rt_write_lock(rwlock_t *rwlock)
+void __sched rt_write_lock(rwlock_t *rwlock) __acquires(RCU)
{
rtlock_might_resched();
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
@@ -246,7 +246,7 @@ void __sched rt_write_lock(rwlock_t *rwlock)
EXPORT_SYMBOL(rt_write_lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
+void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(RCU)
{
rtlock_might_resched();
rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
@@ -257,7 +257,7 @@ void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
EXPORT_SYMBOL(rt_write_lock_nested);
#endif
-void __sched rt_read_unlock(rwlock_t *rwlock)
+void __sched rt_read_unlock(rwlock_t *rwlock) __releases(RCU)
{
rwlock_release(&rwlock->dep_map, _RET_IP_);
migrate_enable();
@@ -266,7 +266,7 @@ void __sched rt_read_unlock(rwlock_t *rwlock)
}
EXPORT_SYMBOL(rt_read_unlock);
-void __sched rt_write_unlock(rwlock_t *rwlock)
+void __sched rt_write_unlock(rwlock_t *rwlock) __releases(RCU)
{
rwlock_release(&rwlock->dep_map, _RET_IP_);
rcu_read_unlock();
Powered by blists - more mailing lists