[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190829083132.22394-8-duyuyang@gmail.com>
Date: Thu, 29 Aug 2019 16:31:09 +0800
From: Yuyang Du <duyuyang@...il.com>
To: peterz@...radead.org, will.deacon@....com, mingo@...nel.org
Cc: bvanassche@....org, ming.lei@...hat.com, frederic@...nel.org,
tglx@...utronix.de, linux-kernel@...r.kernel.org,
longman@...hat.com, paulmck@...ux.vnet.ibm.com,
boqun.feng@...il.com, Yuyang Du <duyuyang@...il.com>
Subject: [PATCH v4 07/30] locking/lockdep: Remove indirect dependency redundancy check
Indirect dependency redundancy check was added for cross-release, which has
been reverted. Then suggested by Peter, only when CONFIG_LOCKDEP_SMALL is
set it takes effect.
With (recursive) read-write lock types considered in dependency graph,
indirect dependency redundancy check would be quite complicated to
implement. Lets remove it for good. This inevitably increases the number of
dependencies, but after combining forward and backward dependencies, the
increase will be offset.
Signed-off-by: Yuyang Du <duyuyang@...il.com>
---
kernel/locking/lockdep.c | 41 --------------------------------------
kernel/locking/lockdep_internals.h | 1 -
kernel/locking/lockdep_proc.c | 2 --
3 files changed, 44 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index a0e62e5..4838c99 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1812,38 +1812,6 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
return ret;
}
-#ifdef CONFIG_LOCKDEP_SMALL
-/*
- * Check that the dependency graph starting at <src> can lead to
- * <target> or not. If it can, <src> -> <target> dependency is already
- * in the graph.
- *
- * Print an error and return 2 if it does or 1 if it does not.
- */
-static noinline int
-check_redundant(struct held_lock *src, struct held_lock *target)
-{
- int ret;
- struct lock_list *uninitialized_var(target_entry);
- struct lock_list src_entry = {
- .class = hlock_class(src),
- .parent = NULL,
- };
-
- debug_atomic_inc(nr_redundant_checks);
-
- ret = check_path(hlock_class(target), &src_entry, &target_entry);
-
- if (!ret) {
- debug_atomic_inc(nr_redundant);
- ret = 2;
- } else if (ret < 0)
- ret = 0;
-
- return ret;
-}
-#endif
-
#ifdef CONFIG_TRACE_IRQFLAGS
static inline int usage_accumulate(struct lock_list *entry, void *mask)
@@ -2507,15 +2475,6 @@ static inline void inc_chains(void)
}
}
-#ifdef CONFIG_LOCKDEP_SMALL
- /*
- * Is the <prev> -> <next> link redundant?
- */
- ret = check_redundant(prev, next);
- if (ret != 1)
- return ret;
-#endif
-
if (!*trace) {
*trace = save_trace();
if (!*trace)
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 18d85ae..f499426 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -177,7 +177,6 @@ struct lockdep_stats {
unsigned long redundant_softirqs_on;
unsigned long redundant_softirqs_off;
int nr_unused_locks;
- unsigned int nr_redundant_checks;
unsigned int nr_redundant;
unsigned int nr_cyclic_checks;
unsigned int nr_find_usage_forwards_checks;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index dadb7b7..edc4a7b 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -178,8 +178,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
debug_atomic_read(chain_lookup_hits));
seq_printf(m, " cyclic checks: %11llu\n",
debug_atomic_read(nr_cyclic_checks));
- seq_printf(m, " redundant checks: %11llu\n",
- debug_atomic_read(nr_redundant_checks));
seq_printf(m, " redundant links: %11llu\n",
debug_atomic_read(nr_redundant));
seq_printf(m, " find-mask forwards checks: %11llu\n",
--
1.8.3.1
Powered by blists - more mailing lists