[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20210815211305.967526724@linutronix.de>
Date: Sun, 15 Aug 2021 23:29:27 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
Juri Lelli <juri.lelli@...hat.com>,
Steven Rostedt <rostedt@...dmis.org>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Davidlohr Bueso <dave@...olabs.net>,
Mike Galbraith <efault@....de>
Subject: [patch V5 71/72] locking/spinlock/rt: Prepare for RT local_lock
Add the static and runtime initializer mechanics to support the RT variant
of local_lock, which requires the lock type in the lockdep map to be set
to LD_LOCK_PERCPU.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
---
V5: New patch
---
include/linux/spinlock_rt.h | 24 ++++++++++++++++--------
include/linux/spinlock_types.h | 6 ++++++
include/linux/spinlock_types_raw.h | 8 ++++++++
kernel/locking/spinlock_rt.c | 7 +++++--
4 files changed, 35 insertions(+), 10 deletions(-)
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -8,20 +8,28 @@
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key);
+ struct lock_class_key *key, bool percpu);
#else
static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
+ struct lock_class_key *key, bool percpu)
{
}
#endif
-#define spin_lock_init(slock) \
-do { \
- static struct lock_class_key __key; \
- \
- rt_mutex_base_init(&(slock)->lock); \
- __rt_spin_lock_init(slock, #slock, &__key); \
+#define spin_lock_init(slock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_base_init(&(slock)->lock); \
+ __rt_spin_lock_init(slock, #slock, &__key, false); \
+} while (0)
+
+#define local_spin_lock_init(slock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ rt_mutex_base_init(&(slock)->lock); \
+ __rt_spin_lock_init(slock, #slock, &__key, true); \
} while (0)
extern void rt_spin_lock(spinlock_t *lock);
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -60,6 +60,12 @@ typedef struct spinlock {
SPIN_DEP_MAP_INIT(name) \
}
+#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \
+ { \
+ .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
+ LOCAL_SPIN_DEP_MAP_INIT(name) \
+ }
+
#define DEFINE_SPINLOCK(name) \
spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
--- a/include/linux/spinlock_types_raw.h
+++ b/include/linux/spinlock_types_raw.h
@@ -37,9 +37,17 @@ typedef struct raw_spinlock {
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
}
+
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ .lock_type = LD_LOCK_PERCPU, \
+ }
#else
# define RAW_SPIN_DEP_MAP_INIT(lockname)
# define SPIN_DEP_MAP_INIT(lockname)
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname)
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
--- a/kernel/locking/spinlock_rt.c
+++ b/kernel/locking/spinlock_rt.c
@@ -120,10 +120,13 @@ EXPORT_SYMBOL(rt_spin_trylock_bh);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __rt_spin_lock_init(spinlock_t *lock, const char *name,
- struct lock_class_key *key)
+ struct lock_class_key *key, bool percpu)
{
+ u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
+
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
+ lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
+ LD_WAIT_INV, type);
}
EXPORT_SYMBOL(__rt_spin_lock_init);
#endif
Powered by blists - more mailing lists