[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190222162622.GB32494@hirez.programming.kicks-ass.net>
Date: Fri, 22 Feb 2019 17:26:22 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Bart Van Assche <bvanassche@....org>
Cc: mingo@...hat.com, will.deacon@....com, tj@...nel.org,
longman@...hat.com, johannes.berg@...el.com,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v7 00/23] locking/lockdep: Add support for dynamic keys
On Thu, Feb 21, 2019 at 02:02:05PM -0800, Bart Van Assche wrote:
> On Thu, 2019-02-14 at 15:00 -0800, Bart Van Assche wrote:
> > A known shortcoming of the current lockdep implementation is that it requires
> > lock keys to be allocated statically. This forces certain unrelated
> > synchronization objects to share keys and this key sharing can cause false
> > positive deadlock reports. This patch series adds support for dynamic keys in
> > the lockdep code and eliminates a class of false positive reports from the
> > workqueue implementation.
> >
> > Please consider these patches for kernel v5.1.
>
> Hi Peter and Ingo,
>
> Do you have any feedback about this patch series that you would like to share?
I've gone over all and I think it looks ok now; I'll give it another
round tomorrow^Wmonday and then queue bits.
So far the only changes I've made are the below. I'm not entirely sure
on the unconditional validity check on DEBUG_LOCKDEP, maybe I'll add a
boot param for that.
---
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -75,8 +75,6 @@ module_param(lock_stat, int, 0644);
#define lock_stat 0
#endif
-static bool check_data_structure_consistency;
-
/*
* lockdep_lock: protects the lockdep graph, the hashes and the
* class/list/hash allocators.
@@ -792,6 +790,8 @@ static bool assign_lock_key(struct lockd
return true;
}
+#ifdef CONFIG_DEBUG_LOCKDEP
+
/* Check whether element @e occurs in list @h */
static bool in_list(struct list_head *e, struct list_head *h)
{
@@ -856,15 +856,15 @@ static bool check_lock_chain_key(struct
* The 'unsigned long long' casts avoid that a compiler warning
* is reported when building tools/lib/lockdep.
*/
- if (chain->chain_key != chain_key)
+ if (chain->chain_key != chain_key) {
printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
(unsigned long long)(chain - lock_chains),
(unsigned long long)chain->chain_key,
(unsigned long long)chain_key);
- return chain->chain_key == chain_key;
-#else
- return true;
+ return false;
+ }
#endif
+ return true;
}
static bool in_any_zapped_class_list(struct lock_class *class)
@@ -872,10 +872,10 @@ static bool in_any_zapped_class_list(str
struct pending_free *pf;
int i;
- for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf);
- i++, pf++)
+ for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
if (in_list(&class->lock_entry, &pf->zapped))
return true;
+ }
return false;
}
@@ -897,7 +897,6 @@ static bool check_data_structures(void)
printk(KERN_INFO "class %px/%s is not in any class list\n",
class, class->name ? : "(?)");
return false;
- return false;
}
}
@@ -954,6 +953,12 @@ static bool check_data_structures(void)
return true;
}
+#else /* CONFIG_DEBUG_LOCKDEP */
+
+static inline bool check_data_structures(void) { return true; }
+
+#endif /* CONFIG_DEBUG_LOCKDEP */
+
/*
* Initialize the lock_classes[] array elements, the free_lock_classes list
* and also the delayed_free structure.
@@ -4480,10 +4485,11 @@ static void remove_class_from_lock_chain
if (chain_hlocks[i] != class - lock_classes)
continue;
/* The code below leaks one chain_hlock[] entry. */
- if (--chain->depth > 0)
+ if (--chain->depth > 0) {
memmove(&chain_hlocks[i], &chain_hlocks[i + 1],
(chain->base + chain->depth - i) *
sizeof(chain_hlocks[0]));
+ }
/*
* Each lock class occurs at most once in a lock chain so once
* we found a match we can break out of this loop.
@@ -4637,8 +4643,7 @@ static void __free_zapped_classes(struct
{
struct lock_class *class;
- if (check_data_structure_consistency)
- WARN_ON_ONCE(!check_data_structures());
+ WARN_ON_ONCE(!check_data_structures());
list_for_each_entry(class, &pf->zapped, lock_entry)
reinit_class(class);
Powered by blists - more mailing lists