[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <176762845418.510.953963495171177178.tip-bot2@tip-bot2>
Date: Mon, 05 Jan 2026 15:54:14 -0000
From: "tip-bot2 for Marco Elver" <tip-bot2@...utronix.de>
To: linux-tip-commits@...r.kernel.org
Cc: Marco Elver <elver@...gle.com>,
"Peter Zijlstra (Intel)" <peterz@...radead.org>, x86@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip: locking/core] rhashtable: Enable context analysis
The following commit has been merged into the locking/core branch of tip:
Commit-ID: 322366b8f13a8cafe169dc1dc6f6ec0d82ff8734
Gitweb: https://git.kernel.org/tip/322366b8f13a8cafe169dc1dc6f6ec0d82ff8734
Author: Marco Elver <elver@...gle.com>
AuthorDate: Fri, 19 Dec 2025 16:40:21 +01:00
Committer: Peter Zijlstra <peterz@...radead.org>
CommitterDate: Mon, 05 Jan 2026 16:43:35 +01:00
rhashtable: Enable context analysis
Enable context analysis for rhashtable, which was used as an initial
test as it contains a combination of RCU, mutex, and bit_spinlock usage.
Users of rhashtable now also benefit from annotations on the API, which
will now warn if the RCU read lock is not held where required.
Signed-off-by: Marco Elver <elver@...gle.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Link: https://patch.msgid.link/20251219154418.3592607-33-elver@google.com
---
include/linux/rhashtable.h | 16 +++++++++++++---
lib/Makefile | 2 ++
lib/rhashtable.c | 5 +++--
3 files changed, 18 insertions(+), 5 deletions(-)
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 08e664b..133ccb3 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -245,16 +245,17 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
+int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU);
static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
+ __acquires_shared(RCU)
{
(void)rhashtable_walk_start_check(iter);
}
void *rhashtable_walk_next(struct rhashtable_iter *iter);
void *rhashtable_walk_peek(struct rhashtable_iter *iter);
-void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU);
void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
@@ -325,6 +326,7 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
static inline unsigned long rht_lock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt)
+ __acquires(__bitlock(0, bkt))
{
unsigned long flags;
@@ -337,6 +339,7 @@ static inline unsigned long rht_lock(struct bucket_table *tbl,
static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bucket,
unsigned int subclass)
+ __acquires(__bitlock(0, bucket))
{
unsigned long flags;
@@ -349,6 +352,7 @@ static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
static inline void rht_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
@@ -424,13 +428,14 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
if (rht_is_a_nulls(obj))
obj = NULL;
lock_map_release(&tbl->dep_map);
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(0, bkt));
local_irq_restore(flags);
}
@@ -612,6 +617,7 @@ static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params,
const enum rht_lookup_freq freq)
+ __must_hold_shared(RCU)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
@@ -666,6 +672,7 @@ restart:
static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_NORMAL);
@@ -676,6 +683,7 @@ static __always_inline void *rhashtable_lookup(
static __always_inline void *rhashtable_lookup_likely(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_LIKELY);
@@ -727,6 +735,7 @@ static __always_inline void *rhashtable_lookup_fast(
static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_NORMAL);
@@ -737,6 +746,7 @@ static __always_inline struct rhlist_head *rhltable_lookup(
static __always_inline struct rhlist_head *rhltable_lookup_likely(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_LIKELY);
diff --git a/lib/Makefile b/lib/Makefile
index e755eee..22d8742 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -50,6 +50,8 @@ lib-$(CONFIG_MIN_HEAP) += min_heap.o
lib-y += kobject.o klist.o
obj-y += lockref.o
+CONTEXT_ANALYSIS_rhashtable.o := y
+
obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
list_sort.o uuid.o iov_iter.o clz_ctz.o \
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index fde0f0e..6074ed5 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -358,6 +358,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
static int rhashtable_rehash_alloc(struct rhashtable *ht,
struct bucket_table *old_tbl,
unsigned int size)
+ __must_hold(&ht->mutex)
{
struct bucket_table *new_tbl;
int err;
@@ -392,6 +393,7 @@ static int rhashtable_rehash_alloc(struct rhashtable *ht,
* bucket locks or concurrent RCU protected lookups and traversals.
*/
static int rhashtable_shrink(struct rhashtable *ht)
+ __must_hold(&ht->mutex)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
unsigned int nelems = atomic_read(&ht->nelems);
@@ -724,7 +726,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
* resize events and always continue.
*/
int rhashtable_walk_start_check(struct rhashtable_iter *iter)
- __acquires(RCU)
+ __acquires_shared(RCU)
{
struct rhashtable *ht = iter->ht;
bool rhlist = ht->rhlist;
@@ -940,7 +942,6 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
* hash table.
*/
void rhashtable_walk_stop(struct rhashtable_iter *iter)
- __releases(RCU)
{
struct rhashtable *ht;
struct bucket_table *tbl = iter->walker.tbl;
Powered by blists - more mailing lists