[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <159851485926.20229.14104571007024271963.tip-bot2@tip-bot2>
Date: Thu, 27 Aug 2020 07:54:19 -0000
From: "tip-bot2 for Boqun Feng" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Peter Zijlstra <peterz@...radead.org>,
Boqun Feng <boqun.feng@...il.com>, x86 <x86@...nel.org>,
LKML <linux-kernel@...r.kernel.org>
Subject: [tip: locking/core] lockdep: Make __bfs(.match) return bool
The following commit has been merged into the locking/core branch of tip:
Commit-ID: 61775ed243433ff0556c4f76905929fe01e92922
Gitweb: https://git.kernel.org/tip/61775ed243433ff0556c4f76905929fe01e92922
Author: Boqun Feng <boqun.feng@...il.com>
AuthorDate: Fri, 07 Aug 2020 15:42:27 +08:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Wed, 26 Aug 2020 12:42:05 +02:00
lockdep: Make __bfs(.match) return bool
The "match" parameter of __bfs() is used for checking whether we hit a
match in the search, therefore it should return a boolean value rather
than an integer for better readability.
This patch then changes the return type of the function parameter and the
match functions to bool.
Suggested-by: Peter Zijlstra <peterz@...radead.org>
Signed-off-by: Boqun Feng <boqun.feng@...il.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Link: https://lkml.kernel.org/r/20200807074238.1632519-9-boqun.feng@gmail.com
---
kernel/locking/lockdep.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 5abc227..78cd74d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1620,7 +1620,7 @@ static inline void bfs_init_rootb(struct lock_list *lock,
*/
static enum bfs_result __bfs(struct lock_list *source_entry,
void *data,
- int (*match)(struct lock_list *entry, void *data),
+ bool (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry,
int offset)
{
@@ -1711,7 +1711,7 @@ exit:
static inline enum bfs_result
__bfs_forwards(struct lock_list *src_entry,
void *data,
- int (*match)(struct lock_list *entry, void *data),
+ bool (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
return __bfs(src_entry, data, match, target_entry,
@@ -1722,7 +1722,7 @@ __bfs_forwards(struct lock_list *src_entry,
static inline enum bfs_result
__bfs_backwards(struct lock_list *src_entry,
void *data,
- int (*match)(struct lock_list *entry, void *data),
+ bool (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
return __bfs(src_entry, data, match, target_entry,
@@ -1833,7 +1833,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
print_circular_bug_entry(entry, depth);
}
-static inline int class_equal(struct lock_list *entry, void *data)
+static inline bool class_equal(struct lock_list *entry, void *data)
{
return entry->class == data;
}
@@ -1888,10 +1888,10 @@ static noinline void print_bfs_bug(int ret)
WARN(1, "lockdep bfs error:%d\n", ret);
}
-static int noop_count(struct lock_list *entry, void *data)
+static bool noop_count(struct lock_list *entry, void *data)
{
(*(unsigned long *)data)++;
- return 0;
+ return false;
}
static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
@@ -2032,11 +2032,11 @@ check_redundant(struct held_lock *src, struct held_lock *target)
#ifdef CONFIG_TRACE_IRQFLAGS
-static inline int usage_accumulate(struct lock_list *entry, void *mask)
+static inline bool usage_accumulate(struct lock_list *entry, void *mask)
{
*(unsigned long *)mask |= entry->class->usage_mask;
- return 0;
+ return false;
}
/*
@@ -2045,9 +2045,9 @@ static inline int usage_accumulate(struct lock_list *entry, void *mask)
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/
-static inline int usage_match(struct lock_list *entry, void *mask)
+static inline bool usage_match(struct lock_list *entry, void *mask)
{
- return entry->class->usage_mask & *(unsigned long *)mask;
+ return !!(entry->class->usage_mask & *(unsigned long *)mask);
}
/*
Powered by blists - more mailing lists