[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190109210204.192109-12-bvanassche@acm.org>
Date: Wed, 9 Jan 2019 13:01:59 -0800
From: Bart Van Assche <bvanassche@....org>
To: peterz@...radead.org
Cc: mingo@...hat.com, tj@...nel.org, longman@...hat.com,
johannes.berg@...el.com, linux-kernel@...r.kernel.org,
Bart Van Assche <bvanassche@....org>,
Johannes Berg <johannes@...solutions.net>
Subject: [PATCH v6 11/16] locking/lockdep: Reuse lock chains that have been freed
A previous patch introduced a lock chain leak. Fix that leak by reusing
lock chains that have been freed.
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Waiman Long <longman@...hat.com>
Cc: Johannes Berg <johannes@...solutions.net>
Signed-off-by: Bart Van Assche <bvanassche@....org>
---
kernel/locking/lockdep.c | 62 ++++++++++++++++++++++------------------
1 file changed, 34 insertions(+), 28 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ef3809e21fa9..a8ea03bfc944 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -299,6 +299,7 @@ static struct pending_free {
struct rcu_head rcu_head;
bool scheduled;
DECLARE_BITMAP(list_entries_being_freed, MAX_LOCKDEP_ENTRIES);
+ DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
} pending_free[2];
static DECLARE_WAIT_QUEUE_HEAD(rcu_cb);
@@ -2110,8 +2111,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
return 0;
}
-static unsigned long nr_lock_chains;
struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
int nr_chain_hlocks;
static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
@@ -2250,12 +2251,25 @@ static int check_no_collision(struct task_struct *curr,
*/
long lockdep_next_lockchain(long i)
{
- return i + 1 < nr_lock_chains ? i + 1 : -2;
+ i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
+ return i < ARRAY_SIZE(lock_chains) ? i : -2;
}
unsigned long lock_chain_count(void)
{
- return nr_lock_chains;
+ return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
+}
+
+/* Must be called with the graph lock held. */
+static struct lock_chain *alloc_lock_chain(void)
+{
+ int idx = find_first_zero_bit(lock_chains_in_use,
+ ARRAY_SIZE(lock_chains));
+
+ if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
+ return NULL;
+ __set_bit(idx, lock_chains_in_use);
+ return lock_chains + idx;
}
/*
@@ -2274,20 +2288,8 @@ static inline int add_chain_cache(struct task_struct *curr,
struct lock_chain *chain;
int i, j;
- /*
- * Allocate a new chain entry from the static array, and add
- * it to the hash:
- */
-
- /*
- * We might need to take the graph lock, ensure we've got IRQs
- * disabled to make this an IRQ-safe lock.. for recursion reasons
- * lockdep won't complain about its own locking errors.
- */
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
- return 0;
-
- if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
+ chain = alloc_lock_chain();
+ if (!chain) {
if (!debug_locks_off_graph_unlock())
return 0;
@@ -2295,7 +2297,6 @@ static inline int add_chain_cache(struct task_struct *curr,
dump_stack();
return 0;
}
- chain = lock_chains + nr_lock_chains++;
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
i = get_first_held_lock(curr, hlock);
@@ -4223,7 +4224,8 @@ void lockdep_reset(void)
}
/* Remove a class from a lock chain. Must be called with the graph lock held. */
-static void remove_class_from_lock_chain(struct lock_chain *chain,
+static void remove_class_from_lock_chain(struct pending_free *pf,
+ struct lock_chain *chain,
struct lock_class *class)
{
#ifdef CONFIG_PROVE_LOCKING
@@ -4261,6 +4263,7 @@ static void remove_class_from_lock_chain(struct lock_chain *chain,
* hlist_for_each_entry_rcu() loop is safe.
*/
hlist_del_rcu(&chain->entry);
+ __set_bit(chain - lock_chains, pf->lock_chains_being_freed);
if (chain->depth == 0)
return;
/*
@@ -4269,22 +4272,19 @@ static void remove_class_from_lock_chain(struct lock_chain *chain,
*/
if (lookup_chain_cache(chain_key))
return;
- if (WARN_ON_ONCE(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
+ new_chain = alloc_lock_chain();
+ if (WARN_ON_ONCE(!new_chain)) {
debug_locks_off();
return;
}
- /*
- * Leak *chain because it is not safe to reinsert it before an RCU
- * grace period has expired.
- */
- new_chain = lock_chains + nr_lock_chains++;
*new_chain = *chain;
hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
#endif
}
/* Must be called with the graph lock held. */
-static void remove_class_from_lock_chains(struct lock_class *class)
+static void remove_class_from_lock_chains(struct pending_free *pf,
+ struct lock_class *class)
{
struct lock_chain *chain;
struct hlist_head *head;
@@ -4293,7 +4293,7 @@ static void remove_class_from_lock_chains(struct lock_class *class)
for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
head = chainhash_table + i;
hlist_for_each_entry_rcu(chain, head, entry) {
- remove_class_from_lock_chain(chain, class);
+ remove_class_from_lock_chain(pf, chain, class);
}
}
}
@@ -4334,7 +4334,7 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
class->name);
}
- remove_class_from_lock_chains(class);
+ remove_class_from_lock_chains(pf, class);
}
static void reinit_class(struct lock_class *class)
@@ -4383,6 +4383,11 @@ static void free_zapped_classes(struct rcu_head *ch)
bitmap_andnot(list_entries_in_use, list_entries_in_use,
pf->list_entries_being_freed, ARRAY_SIZE(list_entries));
bitmap_clear(pf->list_entries_being_freed, 0, ARRAY_SIZE(list_entries));
+#ifdef CONFIG_PROVE_LOCKING
+ bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
+ pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
+ bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
+#endif
graph_unlock();
restore_irqs:
raw_local_irq_restore(flags);
@@ -4675,6 +4680,7 @@ void __init lockdep_init(void)
#ifdef CONFIG_PROVE_LOCKING
+ sizeof(lock_cq)
+ sizeof(lock_chains)
+ + sizeof(lock_chains_in_use)
+ sizeof(chain_hlocks)
#endif
) / 1024
--
2.20.1.97.g81188d93c3-goog
Powered by blists - more mailing lists