lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181128234325.110011-23-bvanassche@acm.org>
Date:   Wed, 28 Nov 2018 15:43:20 -0800
From:   Bart Van Assche <bvanassche@....org>
To:     mingo@...hat.com
Cc:     peterz@...radead.org, tj@...nel.org, johannes.berg@...el.com,
        linux-kernel@...r.kernel.org, Bart Van Assche <bvanassche@....org>
Subject: [PATCH 22/27] locking/lockdep: Reuse list entries that are no longer in use

Instead of abandoning elements of list_entries[] that are no longer in
use, make alloc_list_entry() reuse array elements that have been freed.

Signed-off-by: Bart Van Assche <bvanassche@....org>
---
 include/linux/lockdep.h  |  5 +++++
 kernel/locking/lockdep.c | 23 ++++++++++++++++-------
 2 files changed, 21 insertions(+), 7 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 43327a1dd488..01e55fca7c2c 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -183,6 +183,11 @@ static inline void lockdep_copy_map(struct lockdep_map *to,
 struct lock_list {
 	/* Entry in locks_after or locks_before. */
 	struct list_head		lock_order_entry;
+	/*
+	 * Entry in all_list_entries when in use and entry in
+	 * free_list_entries when not in use.
+	 */
+	struct list_head		alloc_entry;
 	struct lock_class		*class;
 	struct lock_class		*links_to;
 	struct stack_trace		trace;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 038377d67410..288a2f6fd0ef 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -130,6 +130,8 @@ static inline int debug_locks_off_graph_unlock(void)
 
 unsigned long nr_list_entries;
 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+static LIST_HEAD(all_list_entries);
+static LIST_HEAD(free_list_entries);
 
 /*
  * All data structures here are protected by the global debug_lock.
@@ -743,6 +745,9 @@ static void init_lists(void)
 		INIT_LIST_HEAD(&lock_classes[i].locks_after);
 		INIT_LIST_HEAD(&lock_classes[i].locks_before);
 	}
+
+	for (i = 0; i < ARRAY_SIZE(list_entries); i++)
+		list_add_tail(&list_entries[i].alloc_entry, &free_list_entries);
 }
 
 /*
@@ -862,7 +867,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
  */
 static struct lock_list *alloc_list_entry(void)
 {
-	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
+	struct lock_list *e = list_first_entry_or_null(&free_list_entries,
+						       typeof(*e), alloc_entry);
+
+	if (!e) {
 		if (!debug_locks_off_graph_unlock())
 			return NULL;
 
@@ -870,7 +878,8 @@ static struct lock_list *alloc_list_entry(void)
 		dump_stack();
 		return NULL;
 	}
-	return list_entries + nr_list_entries++;
+	list_move_tail(&e->alloc_entry, &all_list_entries);
+	return e;
 }
 
 /*
@@ -975,7 +984,7 @@ static inline void mark_lock_accessed(struct lock_list *lock,
 	unsigned long nr;
 
 	nr = lock - list_entries;
-	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
+	WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
 	lock->parent = parent;
 	lock->class->dep_gen_id = lockdep_dependency_gen_id;
 }
@@ -985,7 +994,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock)
 	unsigned long nr;
 
 	nr = lock - list_entries;
-	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
+	WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
 	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
 }
 
@@ -4235,19 +4244,19 @@ static void zap_class(struct list_head *zapped_classes,
 		      struct lock_class *class)
 {
 	struct lock_class *links_to;
-	struct lock_list *entry;
-	int i;
+	struct lock_list *entry, *tmp;
 
 	/*
 	 * Remove all dependencies this lock is
 	 * involved in:
 	 */
-	for (i = 0, entry = list_entries; i < nr_list_entries; i++, entry++) {
+	list_for_each_entry_safe(entry, tmp, &all_list_entries, alloc_entry) {
 		if (entry->class != class && entry->links_to != class)
 			continue;
 		links_to = entry->links_to;
 		WARN_ON_ONCE(entry->class == links_to);
 		list_del_rcu(&entry->lock_order_entry);
+		list_move(&entry->alloc_entry, &free_list_entries);
 		entry->class = NULL;
 		entry->links_to = NULL;
 		check_free_class(zapped_classes, class);
-- 
2.20.0.rc0.387.gc7a69e6b6c-goog

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ