[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aNUW1ZRt_pcR8eV7@gpd4>
Date: Thu, 25 Sep 2025 12:17:57 +0200
From: Andrea Righi <arighi@...dia.com>
To: Herbert Xu <herbert@...dor.apana.org.au>
Cc: "Paul E. McKenney" <paulmck@...nel.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Steven Rostedt <rostedt@...dmis.org>,
Menglong Dong <dongml2@...natelecom.cn>,
mathieu.desnoyers@...icios.com, linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org,
kernel test robot <oliver.sang@...el.com>, tgraf@...g.ch,
linux-crypto@...r.kernel.org
Subject: Re: [v2 PATCH] rhashtable: Use rcu_dereference_all and
rcu_dereference_all_check
Hi Herbert,
On Tue, Sep 09, 2025 at 05:50:56PM +0800, Herbert Xu wrote:
> On Mon, Sep 08, 2025 at 08:23:27AM -0700, Paul E. McKenney wrote:
> >
> > I am guessing that you want to send this up via the rhashtable path.
>
> Yes I could push that along.
>
> > * This is similar to rcu_dereference_check(), but allows protection
> > * by all forms of vanilla RCU readers, including preemption disabled,
> > * bh-disabled, and interrupt-disabled regions of code. Note that "vanilla
> > * RCU" excludes SRCU and the various Tasks RCU flavors. Please note
> > * that this macro should not be backported to any Linux-kernel version
> > * preceding v5.0 due to changes in synchronize_rcu() semantics prior
> > * to that version.
> >
> > The "should not" vs. "can not" accounts for the possibility of people
> > using synchronize_rcu_mult(), but someone wanting to do that best know
> > what they are doing. ;-)
>
> Thanks! I've incorported that into the patch:
>
> ---8<---
> Add rcu_dereference_all and rcu_dereference_all_check so that
> library code such as rhashtable can be used with any RCU variant.
>
> As it stands rcu_dereference is used within rashtable, which
> creates false-positive warnings if the user calls it from another
> RCU context, such as preempt_disable().
>
> Use the rcu_dereference_all and rcu_dereference_all_check calls
> in rhashtable to suppress these warnings.
>
> Also replace the rcu_dereference_raw calls in the list iterators
> with rcu_dereference_all to uncover buggy calls.
>
> Reported-by: Menglong Dong <dongml2@...natelecom.cn>
> Signed-off-by: Herbert Xu <herbert@...dor.apana.org.au>
> Reviewed-by: Paul E. McKenney <paulmck@...nel.org>
We hit the same issue in sched_ext and with this applied lockep seems
happy. FWIW:
Tested-by: Andrea Righi <arighi@...dia.com>
Thanks,
-Andrea
>
> diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
> index 120536f4c6eb..448eb1f0cb48 100644
> --- a/include/linux/rcupdate.h
> +++ b/include/linux/rcupdate.h
> @@ -713,6 +713,24 @@ do { \
> (c) || rcu_read_lock_sched_held(), \
> __rcu)
>
> +/**
> + * rcu_dereference_all_check() - rcu_dereference_all with debug checking
> + * @p: The pointer to read, prior to dereferencing
> + * @c: The conditions under which the dereference will take place
> + *
> + * This is similar to rcu_dereference_check(), but allows protection
> + * by all forms of vanilla RCU readers, including preemption disabled,
> + * bh-disabled, and interrupt-disabled regions of code. Note that "vanilla
> + * RCU" excludes SRCU and the various Tasks RCU flavors. Please note
> + * that this macro should not be backported to any Linux-kernel version
> + * preceding v5.0 due to changes in synchronize_rcu() semantics prior
> + * to that version.
> + */
> +#define rcu_dereference_all_check(p, c) \
> + __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
> + (c) || rcu_read_lock_any_held(), \
> + __rcu)
> +
> /*
> * The tracing infrastructure traces RCU (we want that), but unfortunately
> * some of the RCU checks causes tracing to lock up the system.
> @@ -767,6 +785,14 @@ do { \
> */
> #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
>
> +/**
> + * rcu_dereference_all() - fetch RCU-all-protected pointer for dereferencing
> + * @p: The pointer to read, prior to dereferencing
> + *
> + * Makes rcu_dereference_check() do the dirty work.
> + */
> +#define rcu_dereference_all(p) rcu_dereference_all_check(p, 0)
> +
> /**
> * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
> * @p: The pointer to hand off
> diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
> index e740157f3cd7..05a221ce79a6 100644
> --- a/include/linux/rhashtable.h
> +++ b/include/linux/rhashtable.h
> @@ -272,13 +272,13 @@ struct rhash_lock_head __rcu **rht_bucket_nested_insert(
> rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
>
> #define rht_dereference_rcu(p, ht) \
> - rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
> + rcu_dereference_all_check(p, lockdep_rht_mutex_is_held(ht))
>
> #define rht_dereference_bucket(p, tbl, hash) \
> rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
>
> #define rht_dereference_bucket_rcu(p, tbl, hash) \
> - rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
> + rcu_dereference_all_check(p, lockdep_rht_bucket_is_held(tbl, hash))
>
> #define rht_entry(tpos, pos, member) \
> ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
> @@ -373,7 +373,7 @@ static inline struct rhash_head *__rht_ptr(
> static inline struct rhash_head *rht_ptr_rcu(
> struct rhash_lock_head __rcu *const *bkt)
> {
> - return __rht_ptr(rcu_dereference(*bkt), bkt);
> + return __rht_ptr(rcu_dereference_all(*bkt), bkt);
> }
>
> static inline struct rhash_head *rht_ptr(
> @@ -497,7 +497,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
> for (({barrier(); }), \
> pos = head; \
> !rht_is_a_nulls(pos); \
> - pos = rcu_dereference_raw(pos->next))
> + pos = rcu_dereference_all(pos->next))
>
> /**
> * rht_for_each_rcu - iterate over rcu hash chain
> @@ -513,7 +513,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
> for (({barrier(); }), \
> pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
> !rht_is_a_nulls(pos); \
> - pos = rcu_dereference_raw(pos->next))
> + pos = rcu_dereference_all(pos->next))
>
> /**
> * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
> @@ -560,7 +560,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
> * list returned by rhltable_lookup.
> */
> #define rhl_for_each_rcu(pos, list) \
> - for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
> + for (pos = list; pos; pos = rcu_dereference_all(pos->next))
>
> /**
> * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
> @@ -574,7 +574,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
> */
> #define rhl_for_each_entry_rcu(tpos, pos, list, member) \
> for (pos = list; pos && rht_entry(tpos, pos, member); \
> - pos = rcu_dereference_raw(pos->next))
> + pos = rcu_dereference_all(pos->next))
>
> static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
> const void *obj)
> --
> Email: Herbert Xu <herbert@...dor.apana.org.au>
> Home Page: http://gondor.apana.org.au/~herbert/
> PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
Powered by blists - more mailing lists