[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180117151414.23686-3-willy@infradead.org>
Date: Wed, 17 Jan 2018 07:14:13 -0800
From: Matthew Wilcox <willy@...radead.org>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>
Cc: "David S. Miller" <davem@...emloft.net>,
Matthew Wilcox <mawilcox@...rosoft.com>,
Thomas Gleixner <tglx@...utronix.de>,
linux-kernel@...r.kernel.org
Subject: [PATCH 2/3] lockdep: Make lockdep checking constant
From: Matthew Wilcox <mawilcox@...rosoft.com>
There are several places in the kernel which would like to pass a const
pointer to lockdep_is_held(). Constify the entire path so nobody has to
trick the compiler.
Signed-off-by: Matthew Wilcox <mawilcox@...rosoft.com>
---
include/linux/lockdep.h | 4 ++--
kernel/locking/lockdep.c | 13 +++++++------
2 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 3251d9c0d313..864d6fc60fa6 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
/*
* Same "read" as for lock_acquire(), except -1 means any.
*/
-extern int lock_is_held_type(struct lockdep_map *lock, int read);
+extern int lock_is_held_type(const struct lockdep_map *lock, int read);
-static inline int lock_is_held(struct lockdep_map *lock)
+static inline int lock_is_held(const struct lockdep_map *lock)
{
return lock_is_held_type(lock, -1);
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 472547dd45c3..b7a307b53704 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -648,7 +648,7 @@ static int count_matching_names(struct lock_class *new_class)
}
static inline struct lock_class *
-look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
+look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
struct hlist_head *hash_head;
@@ -3276,7 +3276,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
return 0;
}
-static int __lock_is_held(struct lockdep_map *lock, int read);
+static int __lock_is_held(const struct lockdep_map *lock, int read);
/*
* This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3485,13 +3485,14 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
return 0;
}
-static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+static int match_held_lock(const struct held_lock *hlock,
+ const struct lockdep_map *lock)
{
if (hlock->instance == lock)
return 1;
if (hlock->references) {
- struct lock_class *class = lock->class_cache[0];
+ const struct lock_class *class = lock->class_cache[0];
if (!class)
class = look_up_lock_class(lock, 0);
@@ -3727,7 +3728,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
return 1;
}
-static int __lock_is_held(struct lockdep_map *lock, int read)
+static int __lock_is_held(const struct lockdep_map *lock, int read)
{
struct task_struct *curr = current;
int i;
@@ -3941,7 +3942,7 @@ void lock_release(struct lockdep_map *lock, int nested,
}
EXPORT_SYMBOL_GPL(lock_release);
-int lock_is_held_type(struct lockdep_map *lock, int read)
+int lock_is_held_type(const struct lockdep_map *lock, int read)
{
unsigned long flags;
int ret = 0;
--
2.15.1
Powered by blists - more mailing lists