[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250918140451.1289454-14-elver@google.com>
Date: Thu, 18 Sep 2025 15:59:24 +0200
From: Marco Elver <elver@...gle.com>
To: elver@...gle.com, Peter Zijlstra <peterz@...radead.org>,
Boqun Feng <boqun.feng@...il.com>, Ingo Molnar <mingo@...nel.org>, Will Deacon <will@...nel.org>
Cc: "David S. Miller" <davem@...emloft.net>, Luc Van Oostenryck <luc.vanoostenryck@...il.com>,
"Paul E. McKenney" <paulmck@...nel.org>, Alexander Potapenko <glider@...gle.com>, Arnd Bergmann <arnd@...db.de>,
Bart Van Assche <bvanassche@....org>, Bill Wendling <morbo@...gle.com>, Christoph Hellwig <hch@....de>,
Dmitry Vyukov <dvyukov@...gle.com>, Eric Dumazet <edumazet@...gle.com>,
Frederic Weisbecker <frederic@...nel.org>, Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
Herbert Xu <herbert@...dor.apana.org.au>, Ian Rogers <irogers@...gle.com>,
Jann Horn <jannh@...gle.com>, Joel Fernandes <joelagnelf@...dia.com>,
Jonathan Corbet <corbet@....net>, Josh Triplett <josh@...htriplett.org>,
Justin Stitt <justinstitt@...gle.com>, Kees Cook <kees@...nel.org>,
Kentaro Takeda <takedakn@...data.co.jp>, Lukas Bulwahn <lukas.bulwahn@...il.com>,
Mark Rutland <mark.rutland@....com>, Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Miguel Ojeda <ojeda@...nel.org>, Nathan Chancellor <nathan@...nel.org>,
Neeraj Upadhyay <neeraj.upadhyay@...nel.org>,
Nick Desaulniers <nick.desaulniers+lkml@...il.com>, Steven Rostedt <rostedt@...dmis.org>,
Tetsuo Handa <penguin-kernel@...ove.SAKURA.ne.jp>, Thomas Gleixner <tglx@...utronix.de>,
Thomas Graf <tgraf@...g.ch>, Uladzislau Rezki <urezki@...il.com>, Waiman Long <longman@...hat.com>,
kasan-dev@...glegroups.com, linux-crypto@...r.kernel.org,
linux-doc@...r.kernel.org, linux-kbuild@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org,
linux-security-module@...r.kernel.org, linux-sparse@...r.kernel.org,
llvm@...ts.linux.dev, rcu@...r.kernel.org
Subject: [PATCH v3 13/35] bit_spinlock: Support Clang's capability analysis
The annotations for bit_spinlock.h have simply been using "bitlock" as
the token. For Sparse, that was likely sufficient in most cases. But
Clang's capability analysis is more precise, and we need to ensure we
can distinguish different bitlocks.
To do so, add a token capability, and a macro __bitlock(bitnum, addr)
that is used to construct unique per-bitlock tokens.
Add the appropriate test.
<linux/list_bl.h> is implicitly included through other includes, and
requires 2 annotations to indicate that acquisition (without release)
and release (without prior acquisition) of its bitlock is intended.
Signed-off-by: Marco Elver <elver@...gle.com>
---
.../dev-tools/capability-analysis.rst | 3 ++-
include/linux/bit_spinlock.h | 22 +++++++++++++---
include/linux/list_bl.h | 2 ++
lib/test_capability-analysis.c | 26 +++++++++++++++++++
4 files changed, 48 insertions(+), 5 deletions(-)
diff --git a/Documentation/dev-tools/capability-analysis.rst b/Documentation/dev-tools/capability-analysis.rst
index 4789de7b019a..56c6ba7205aa 100644
--- a/Documentation/dev-tools/capability-analysis.rst
+++ b/Documentation/dev-tools/capability-analysis.rst
@@ -81,7 +81,8 @@ Supported Kernel Primitives
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Currently the following synchronization primitives are supported:
-`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`.
+`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`,
+`bit_spinlock`.
For capabilities with an initialization function (e.g., `spin_lock_init()`),
calling this function on the capability instance before initializing any
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index 59e345f74b0e..ba3a67f39f0c 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -9,6 +9,16 @@
#include <asm/processor.h> /* for cpu_relax() */
+/*
+ * For static capability analysis, we need a unique token for each possible bit
+ * that can be used as a bit_spinlock. The easiest way to do that is to create a
+ * fake capability that we can cast to with the __bitlock(bitnum, addr) macro
+ * below, which will give us unique instances for each (bit, addr) pair that the
+ * static analysis can use.
+ */
+struct_with_capability(__capability_bitlock) { };
+#define __bitlock(bitnum, addr) (struct __capability_bitlock *)(bitnum + (addr))
+
/*
* bit-based spin_lock()
*
@@ -16,6 +26,7 @@
* are significantly faster.
*/
static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
+ __acquires(__bitlock(bitnum, addr))
{
/*
* Assuming the lock is uncontended, this never enters
@@ -34,13 +45,14 @@ static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
preempt_disable();
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
}
/*
* Return true if it was acquired
*/
static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+ __cond_acquires(true, __bitlock(bitnum, addr))
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -49,7 +61,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
return 0;
}
#endif
- __acquire(bitlock);
+ __acquire(__bitlock(bitnum, addr));
return 1;
}
@@ -57,6 +69,7 @@ static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
* bit-based spin_unlock()
*/
static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -65,7 +78,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
@@ -74,6 +87,7 @@ static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
* protecting the rest of the flags in the word.
*/
static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+ __releases(__bitlock(bitnum, addr))
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -82,7 +96,7 @@ static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
__clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(bitnum, addr));
}
/*
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index ae1b541446c9..df9eebe6afca 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -144,11 +144,13 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
}
static inline void hlist_bl_lock(struct hlist_bl_head *b)
+ __acquires(__bitlock(0, b))
{
bit_spin_lock(0, (unsigned long *)b);
}
static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+ __releases(__bitlock(0, b))
{
__bit_spin_unlock(0, (unsigned long *)b);
}
diff --git a/lib/test_capability-analysis.c b/lib/test_capability-analysis.c
index 74d287740bb8..ad362d5a7916 100644
--- a/lib/test_capability-analysis.c
+++ b/lib/test_capability-analysis.c
@@ -4,6 +4,7 @@
* positive errors when compiled with Clang's capability analysis.
*/
+#include <linux/bit_spinlock.h>
#include <linux/build_bug.h>
#include <linux/mutex.h>
#include <linux/seqlock.h>
@@ -251,3 +252,28 @@ static void __used test_seqlock_writer(struct test_seqlock_data *d)
d->counter++;
write_sequnlock_irqrestore(&d->sl, flags);
}
+
+struct test_bit_spinlock_data {
+ unsigned long bits;
+ int counter __guarded_by(__bitlock(3, &bits));
+};
+
+static void __used test_bit_spin_lock(struct test_bit_spinlock_data *d)
+{
+ /*
+ * Note, the analysis seems to have false negatives, because it won't
+ * precisely recognize the bit of the fake __bitlock() token.
+ */
+ bit_spin_lock(3, &d->bits);
+ d->counter++;
+ bit_spin_unlock(3, &d->bits);
+
+ bit_spin_lock(3, &d->bits);
+ d->counter++;
+ __bit_spin_unlock(3, &d->bits);
+
+ if (bit_spin_trylock(3, &d->bits)) {
+ d->counter++;
+ bit_spin_unlock(3, &d->bits);
+ }
+}
--
2.51.0.384.g4c02a37b29-goog
Powered by blists - more mailing lists