[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20191216151517.7060-6-longman@redhat.com>
Date: Mon, 16 Dec 2019 10:15:16 -0500
From: Waiman Long <longman@...hat.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Will Deacon <will.deacon@....com>
Cc: linux-kernel@...r.kernel.org, Bart Van Assche <bvanassche@....org>,
Waiman Long <longman@...hat.com>
Subject: [PATCH v2 5/6] locking/lockdep: Decrement irq context counters when removing lock chain
There are currently three counters to track the irq context of a lock
chain - nr_hardirq_chains, nr_softirq_chains and nr_process_chains.
They are incremented when a new lock chain is added, but they are
not decremented when a lock chain is removed. That causes some of the
statistic counts reported by /proc/lockdep_stats to be incorrect.
Fix that by decrementing the right counter when a lock chain is removed.
Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use")
Signed-off-by: Waiman Long <longman@...hat.com>
---
kernel/locking/lockdep.c | 35 +++++++++++++++++++++---------
kernel/locking/lockdep_internals.h | 6 +++++
2 files changed, 31 insertions(+), 10 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index f7dcd4e77a70..4ea23bb6f8d3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2300,16 +2300,24 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
return 0;
}
-static void inc_chains(void)
+static void inc_chains(int irq_context)
{
- if (current->hardirq_context)
+ if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
nr_hardirq_chains++;
- else {
- if (current->softirq_context)
- nr_softirq_chains++;
- else
- nr_process_chains++;
- }
+ else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
+ nr_softirq_chains++;
+ else
+ nr_process_chains++;
+}
+
+static void dec_chains(int irq_context)
+{
+ if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
+ nr_hardirq_chains--;
+ else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
+ nr_softirq_chains--;
+ else
+ nr_process_chains--;
}
#else
@@ -2325,6 +2333,10 @@ static inline void inc_chains(void)
nr_process_chains++;
}
+static void dec_chains(int irq_context)
+{
+ nr_process_chains--;
+}
#endif /* CONFIG_TRACE_IRQFLAGS */
static void
@@ -3037,7 +3049,7 @@ static inline int add_chain_cache(struct task_struct *curr,
chain_hlocks[chain->base + j] = class - lock_classes;
hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
- inc_chains();
+ inc_chains(chain->irq_context);
return 1;
}
@@ -3790,7 +3802,8 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
static inline unsigned int task_irq_context(struct task_struct *task)
{
- return 2 * !!task->hardirq_context + !!task->softirq_context;
+ return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context +
+ LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
}
static int separate_irq_context(struct task_struct *curr,
@@ -4980,6 +4993,8 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
free_chain_hlocks(chain->base, chain->depth);
/* Overwrite the chain key for concurrent RCU readers. */
WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY);
+ dec_chains(chain->irq_context);
+
/*
* Note: calling hlist_del_rcu() from inside a
* hlist_for_each_entry_rcu() loop is safe.
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 91bbc5684103..9425155b9053 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -98,6 +98,12 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
#define MAX_LOCKDEP_CHAINS_BITS 16
+/*
+ * Bit definitions for lock_chain.irq_context
+ */
+#define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0)
+#define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1)
+
/*
* Stack-trace: tightly packed array of stack backtrace
* addresses. Protected by the hash_lock.
--
2.18.1
Powered by blists - more mailing lists