Add a lockdep annotation that WARNs if you 'accidentially' unlock a lock. This is especially helpful for code with callbacks, where the upper layer assumes a lock remains taken but a lower layer thinks it maybe can drop and reacquire the lock. By unwittingly breaking up the lock, races can be introduced. Lock pinning is a lockdep annotation that helps with this, when you lockdep_pin_lock() a held lock, any unlock without a lockdep_unpin_lock() will produce a WARN. Think of this as a relative of lockdep_assert_held(), except you don't only assert its held now, but ensure it stays held until you release your assertion. RFC: a possible alternative API would be something like: int cookie = lockdep_pin_lock(&foo); ... lockdep_unpin_lock(&foo, cookie); Where we pick a random number for the pin_count; this makes it impossible to sneak a lock break in without also passing the right cookie along. I've not done this because it ends up generating code for !LOCKDEP, esp. if you need to pass the cookie around for some reason. Signed-off-by: Peter Zijlstra (Intel) --- include/linux/lockdep.h | 10 +++++ kernel/locking/lockdep.c | 80 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+) --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -255,6 +255,7 @@ struct held_lock { unsigned int check:1; /* see lock_acquire() comment */ unsigned int hardirqs_off:1; unsigned int references:12; /* 32 bits */ + unsigned int pin_count; }; /* @@ -354,6 +355,9 @@ extern void lockdep_set_current_reclaim_ extern void lockdep_clear_current_reclaim_state(void); extern void lockdep_trace_alloc(gfp_t mask); +extern void lock_pin_lock(struct lockdep_map *lock); +extern void lock_unpin_lock(struct lockdep_map *lock); + # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -368,6 +372,9 @@ extern void lockdep_trace_alloc(gfp_t ma #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) +#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) +#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map) + #else /* !CONFIG_LOCKDEP */ static inline void lockdep_off(void) @@ -420,6 +427,9 @@ struct lock_class_key { }; #define lockdep_recursing(tsk) (0) +#define lockdep_pin_lock(l) do { (void)(l); } while (0) +#define lockdep_unpin_lock(l) do { (void)(l); } while (0) + #endif /* !LOCKDEP */ #ifdef CONFIG_LOCK_STAT --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3157,6 +3157,7 @@ static int __lock_acquire(struct lockdep hlock->waittime_stamp = 0; hlock->holdtime_stamp = lockstat_clock(); #endif + hlock->pin_count = 0; if (check && !mark_irqflags(curr, hlock)) return 0; @@ -3403,6 +3404,8 @@ __lock_release(struct lockdep_map *lock, if (hlock->instance == lock) lock_release_holdtime(hlock); + WARN(hlock->pin_count, "releasing a pinned lock\n"); + if (hlock->references) { hlock->references--; if (hlock->references) { @@ -3459,6 +3462,49 @@ static int __lock_is_held(struct lockdep return 0; } +static void __lock_pin_lock(struct lockdep_map *lock) +{ + struct task_struct *curr = current; + int i; + + if (unlikely(!debug_locks)) + return; + + for (i = 0; i < curr->lockdep_depth; i++) { + struct held_lock *hlock = curr->held_locks + i; + + if (match_held_lock(hlock, lock)) { + hlock->pin_count++; + return; + } + } + + WARN(1, "pinning an unheld lock\n"); +} + +static void __lock_unpin_lock(struct lockdep_map *lock) +{ + struct task_struct *curr = current; + int i; + + if (unlikely(!debug_locks)) + return; + + for (i = 0; i < curr->lockdep_depth; i++) { + struct held_lock *hlock = curr->held_locks + i; + + if (match_held_lock(hlock, lock)) { + if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n")) + return; + + hlock->pin_count--; + return; + } + } + + WARN(1, "unpinning an unheld lock\n"); +} + /* * Check whether we follow the irq-flags state precisely: */ @@ -3582,6 +3628,40 @@ int lock_is_held(struct lockdep_map *loc } EXPORT_SYMBOL_GPL(lock_is_held); +void lock_pin_lock(struct lockdep_map *lock) +{ + unsigned long flags; + + if (unlikely(current->lockdep_recursion)) + return; + + raw_local_irq_save(flags); + check_flags(flags); + + current->lockdep_recursion = 1; + __lock_pin_lock(lock); + current->lockdep_recursion = 0; + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(lock_pin_lock); + +void lock_unpin_lock(struct lockdep_map *lock) +{ + unsigned long flags; + + if (unlikely(current->lockdep_recursion)) + return; + + raw_local_irq_save(flags); + check_flags(flags); + + current->lockdep_recursion = 1; + __lock_unpin_lock(lock); + current->lockdep_recursion = 0; + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(lock_unpin_lock); + void lockdep_set_current_reclaim_state(gfp_t gfp_mask) { current->lockdep_reclaim_gfp = gfp_mask; -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/