[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190212171423.8308-4-frederic@kernel.org>
Date: Tue, 12 Feb 2019 18:13:54 +0100
From: Frederic Weisbecker <frederic@...nel.org>
To: LKML <linux-kernel@...r.kernel.org>
Cc: Frederic Weisbecker <frederic@...nel.org>,
Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
Peter Zijlstra <peterz@...radead.org>,
Mauro Carvalho Chehab <mchehab@...pensource.com>,
Linus Torvalds <torvalds@...ux-foundation.org>,
"David S . Miller" <davem@...emloft.net>,
Thomas Gleixner <tglx@...utronix.de>,
"Paul E . McKenney" <paulmck@...ux.vnet.ibm.com>,
Frederic Weisbecker <fweisbec@...il.com>,
Pavan Kondeti <pkondeti@...eaurora.org>,
Ingo Molnar <mingo@...nel.org>,
Joel Fernandes <joel@...lfernandes.org>
Subject: [PATCH 03/32] locking/lockdep: Convert usage_mask to u64
The usage mask is going to expand to validate softirq related usages in
a per-vector finegrained way.
The current bitmap layout is:
LOCK_USED HARDIRQ bits
\ /
\ /
0 0000 0000
|
|
SOFTIRQ bits
The new one will be:
TIMER_SOFTIRQ
LOCK_USED bits HARDIRQ bits
\ | |
\ | |
0 0000 [...] 0000 0000 0000
| |
| |
RCU_SOFTIRQ HI_SOFTIRQ bits
bits
So we have 4 hardirq bits + NR_SOFTIRQS * 4 + 1 bit (LOCK_USED) = 45
bits. Therefore we need a 64 bits mask.
Signed-off-by: Frederic Weisbecker <frederic@...nel.org>
Cc: Mauro Carvalho Chehab <mchehab@...pensource.com>
Cc: Joel Fernandes <joel@...lfernandes.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Pavan Kondeti <pkondeti@...eaurora.org>
Cc: Paul E . McKenney <paulmck@...ux.vnet.ibm.com>
Cc: David S . Miller <davem@...emloft.net>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
---
include/linux/lockdep.h | 2 +-
kernel/locking/lockdep.c | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c5335df2372f..06669f20a30a 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -83,7 +83,7 @@ struct lock_class {
/*
* IRQ/softirq usage tracking bits:
*/
- unsigned long usage_mask;
+ u64 usage_mask;
struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
/*
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1bb955d22eae..a977aa5976b7 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -463,9 +463,9 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
}
-static inline unsigned long lock_flag(enum lock_usage_bit bit)
+static inline u64 lock_flag(enum lock_usage_bit bit)
{
- return 1UL << bit;
+ return BIT_ULL(bit);
}
static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
@@ -1400,7 +1400,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
printk(KERN_CONT " {\n");
for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
- if (class->usage_mask & (1 << bit)) {
+ if (class->usage_mask & lock_flag(bit)) {
int len = depth;
len += printk("%*s %s", depth, "", usage_str[bit]);
@@ -2478,7 +2478,7 @@ static inline int
valid_state(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
{
- if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
+ if (unlikely(hlock_class(this)->usage_mask & lock_flag(bad_bit)))
return print_usage_bug(curr, this, bad_bit, new_bit);
return 1;
}
--
2.17.1
Powered by blists - more mailing lists