[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190829083132.22394-17-duyuyang@gmail.com>
Date: Thu, 29 Aug 2019 16:31:18 +0800
From: Yuyang Du <duyuyang@...il.com>
To: peterz@...radead.org, will.deacon@....com, mingo@...nel.org
Cc: bvanassche@....org, ming.lei@...hat.com, frederic@...nel.org,
tglx@...utronix.de, linux-kernel@...r.kernel.org,
longman@...hat.com, paulmck@...ux.vnet.ibm.com,
boqun.feng@...il.com, Yuyang Du <duyuyang@...il.com>
Subject: [PATCH v4 16/30] locking/lockdep: Add lock chains to direct lock dependency graph
Lock chains are derived from the current task lock stack. A lock chain is a
new sequence of locks taken by task or by interrupt contexts. It is not hard
to notice that lockdep validates the locking behavior on a lock chain basis,
hence its main function name validate_chain(). With a new lock chain, there
may be a new direct dependency, and if so the new dependency is checked.
Every direct lock dependency must be the top two locks in the lock
chains, but one direct dependency normally is associated with a number
of lock chains.
With such relationship between lock dependencies and lock chains, this
patch maps lock chains to their corresponding lock dependencies, for
example:
Lock dependency: L1 -> L2:
|
|--> Lock chain 1: .... L1 -> L2
|
|--> Lock chain 2: .... L1 -> L2
|
.....
Signed-off-by: Yuyang Du <duyuyang@...il.com>
---
kernel/locking/lockdep.c | 85 +++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 81 insertions(+), 4 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 33f8187..754a718 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1046,6 +1046,35 @@ static bool __check_data_structures(void)
e->class[1]->name ? : "(?)");
return false;
}
+
+#ifdef CONFIG_PROVE_LOCKING
+ list_for_each_entry_rcu(chain, &e->chains, chain_entry) {
+ if (!check_lock_chain_key(chain))
+ return false;
+
+ /* <next> lock */
+ class = lock_classes +
+ chain_hlocks[chain->base + chain->depth - 1];
+ if (class != e->class[1]) {
+ printk(KERN_INFO "list entry %d has bad <next> lock; class %s <> %s\n",
+ (unsigned int)(e - list_entries),
+ class->name ? : "(?)",
+ e->class[1]->name ? : "(?)");
+ return false;
+ }
+
+ /* <prev> lock */
+ class = lock_classes +
+ chain_hlocks[chain->base + chain->depth - 2];
+ if (class != e->class[0]) {
+ printk(KERN_INFO "list entry %d has bad <prev> lock; class %s <> %s\n",
+ (unsigned int)(e - list_entries),
+ class->name ? : "(?)",
+ e->class[0]->name ? : "(?)");
+ return false;
+ }
+ }
+#endif
}
/*
@@ -1072,6 +1101,16 @@ static bool __check_data_structures(void)
e->class[1]->name : "(?)");
return false;
}
+
+ if (!list_empty(&e->chains)) {
+ printk(KERN_INFO "list entry %d has nonempty chain list; class %s <> %s\n",
+ (unsigned int)(e - list_entries),
+ e->class[0] && e->class[0]->name ? e->class[0]->name :
+ "(?)",
+ e->class[1] && e->class[1]->name ?
+ e->class[1]->name : "(?)");
+ return false;
+ }
}
return true;
@@ -1128,6 +1167,9 @@ static void init_data_structures_once(void)
INIT_LIST_HEAD(&lock_classes[i].dep_list[0]);
INIT_LIST_HEAD(&lock_classes[i].dep_list[1]);
}
+
+ for (i = 0; i < ARRAY_SIZE(list_entries); i++)
+ INIT_LIST_HEAD(&list_entries[i].chains);
}
static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
@@ -1302,6 +1344,7 @@ static bool is_dynamic_key(const struct lock_class_key *key)
}
#ifdef CONFIG_PROVE_LOCKING
+static DECLARE_BITMAP(lock_chains_in_dep, MAX_LOCKDEP_CHAINS);
static inline struct
lock_chain *lookup_chain_cache_add(struct task_struct *curr,
struct held_lock *hlock,
@@ -1335,7 +1378,8 @@ static struct lock_list *alloc_list_entry(void)
static int add_lock_to_list(struct lock_class *lock1,
struct lock_class *lock2,
unsigned long ip, int distance,
- const struct lock_trace *trace)
+ const struct lock_trace *trace,
+ struct lock_chain *chain)
{
struct lock_list *entry;
/*
@@ -1359,6 +1403,12 @@ static int add_lock_to_list(struct lock_class *lock1,
list_add_tail_rcu(&entry->entry[1], &lock1->dep_list[1]);
list_add_tail_rcu(&entry->entry[0], &lock2->dep_list[0]);
+ /*
+ * Add the corresponding lock chain to lock_list's chains.
+ */
+ list_add_tail_rcu(&chain->chain_entry, &entry->chains);
+ __set_bit(chain - lock_chains, lock_chains_in_dep);
+
return 1;
}
@@ -2478,6 +2528,9 @@ static inline void inc_chains(void)
if (fw_dep_class(entry) == hlock_class(next)) {
debug_atomic_inc(nr_redundant);
+ list_add_tail_rcu(&chain->chain_entry, &entry->chains);
+ __set_bit(chain - lock_chains, lock_chains_in_dep);
+
if (distance == 1)
entry->distance = 1;
@@ -2524,8 +2577,7 @@ static inline void inc_chains(void)
* dependency list of the previous lock <prev>:
*/
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
- next->acquire_ip, distance, *trace);
-
+ next->acquire_ip, distance, *trace, chain);
if (!ret)
return 0;
@@ -4783,8 +4835,11 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
* If the modified lock chain matches an existing lock chain, drop
* the modified lock chain.
*/
- if (lookup_chain_cache(chain_key))
+ if (lookup_chain_cache(chain_key)) {
+ if (test_bit(chain - lock_chains, lock_chains_in_dep))
+ list_del_rcu(&chain->chain_entry);
return;
+ }
new_chain = alloc_lock_chain();
if (WARN_ON_ONCE(!new_chain)) {
debug_locks_off();
@@ -4792,6 +4847,10 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
}
*new_chain = *chain;
hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
+ if (test_bit(chain - lock_chains, lock_chains_in_dep)) {
+ list_replace_rcu(&chain->chain_entry, &new_chain->chain_entry);
+ __set_bit(new_chain - lock_chains, lock_chains_in_dep);
+ }
#endif
}
@@ -4817,6 +4876,7 @@ static void remove_class_from_lock_chains(struct pending_free *pf,
static void zap_class(struct pending_free *pf, struct lock_class *class)
{
struct lock_list *entry;
+ struct lock_chain *chain;
int i;
WARN_ON_ONCE(!class->key);
@@ -4833,6 +4893,20 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
nr_list_entries--;
list_del_rcu(&entry->entry[0]);
list_del_rcu(&entry->entry[1]);
+
+#ifdef CONFIG_PROVE_LOCKING
+ /*
+ * Destroy the chain list by deleting every chain associated
+ * with this lock_list entry.
+ *
+ * The corresponding lock chains in this lock_list will
+ * be removed later by remove_class_from_lock_chains().
+ */
+ list_for_each_entry_rcu(chain, &entry->chains, chain_entry) {
+ __clear_bit(chain - lock_chains, lock_chains_in_dep);
+ list_del_rcu(&chain->chain_entry);
+ }
+#endif
}
if (list_empty(&class->dep_list[0]) &&
list_empty(&class->dep_list[1])) {
@@ -4919,6 +4993,8 @@ static void __free_zapped_classes(struct pending_free *pf)
#ifdef CONFIG_PROVE_LOCKING
bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
+ bitmap_andnot(lock_chains_in_dep, lock_chains_in_dep,
+ pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
#endif
}
@@ -5197,6 +5273,7 @@ void __init lockdep_init(void)
+ sizeof(lock_cq)
+ sizeof(lock_chains)
+ sizeof(lock_chains_in_use)
+ + sizeof(lock_chains_in_dep)
+ sizeof(chain_hlocks)
#endif
) / 1024
--
1.8.3.1
Powered by blists - more mailing lists