[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b2e3f866-3644-4730-b2d3-6b84b1433a9d@redhat.com>
Date: Thu, 11 Jan 2024 23:52:10 -0500
From: Waiman Long <longman@...hat.com>
To: "Matthew Wilcox (Oracle)" <willy@...radead.org>,
Chandan Babu R <chandan.babu@...cle.com>
Cc: linux-kernel@...r.kernel.org, linux-xfs@...r.kernel.org,
"Darrick J . Wong" <djwong@...nel.org>, Mateusz Guzik <mjguzik@...il.com>,
Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...hat.com>,
Will Deacon <will@...nel.org>
Subject: Re: [PATCH v5 1/3] locking: Add rwsem_assert_held() and
rwsem_assert_held_write()
On 1/11/24 16:24, Matthew Wilcox (Oracle) wrote:
> Modelled after lockdep_assert_held() and lockdep_assert_held_write(),
> but are always active, even when lockdep is disabled. Of course, they
> don't test that _this_ thread is the owner, but it's sufficient to catch
> many bugs and doesn't incur the same performance penalty as lockdep.
I don't mind the new *assert_held*nolockdep APIs. The only nit that I
have is that their behavior is slightly different from the corresponding
lockdep counterparts as they don't imply the current process is holding
the lock. So we may need to have some comment to document the difference
and set the right expectation. Of course it can be done with a follow-up
patch.
Acked-by: Waiman Long <longman@...hat.com>
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@...radead.org>
> Acked-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> ---
> include/linux/rwbase_rt.h | 9 ++++++--
> include/linux/rwsem.h | 46 ++++++++++++++++++++++++++++++++++-----
> 2 files changed, 48 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h
> index 1d264dd08625..29c4e4f243e4 100644
> --- a/include/linux/rwbase_rt.h
> +++ b/include/linux/rwbase_rt.h
> @@ -26,12 +26,17 @@ struct rwbase_rt {
> } while (0)
>
>
> -static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb)
> +static __always_inline bool rw_base_is_locked(const struct rwbase_rt *rwb)
> {
> return atomic_read(&rwb->readers) != READER_BIAS;
> }
>
> -static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb)
> +static inline void rw_base_assert_held_write(const struct rwbase_rt *rwb)
> +{
> + WARN_ON(atomic_read(&rwb->readers) != WRITER_BIAS);
> +}
> +
> +static __always_inline bool rw_base_is_contended(const struct rwbase_rt *rwb)
> {
> return atomic_read(&rwb->readers) > 0;
> }
> diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
> index 9c29689ff505..4f1c18992f76 100644
> --- a/include/linux/rwsem.h
> +++ b/include/linux/rwsem.h
> @@ -66,14 +66,24 @@ struct rw_semaphore {
> #endif
> };
>
> -/* In all implementations count != 0 means locked */
> +#define RWSEM_UNLOCKED_VALUE 0UL
> +#define RWSEM_WRITER_LOCKED (1UL << 0)
> +#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
> +
> static inline int rwsem_is_locked(struct rw_semaphore *sem)
> {
> - return atomic_long_read(&sem->count) != 0;
> + return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE;
> }
>
> -#define RWSEM_UNLOCKED_VALUE 0L
> -#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
> +static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
> +{
> + WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
> +}
> +
> +static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
> +{
> + WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
> +}
>
> /* Common initializer macros and functions */
>
> @@ -152,11 +162,21 @@ do { \
> __init_rwsem((sem), #sem, &__key); \
> } while (0)
>
> -static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
> +static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
> {
> return rw_base_is_locked(&sem->rwbase);
> }
>
> +static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
> +{
> + WARN_ON(!rwsem_is_locked(sem));
> +}
> +
> +static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
> +{
> + rw_base_assert_held_write(sem);
> +}
> +
> static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
> {
> return rw_base_is_contended(&sem->rwbase);
> @@ -169,6 +189,22 @@ static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
> * the RT specific variant.
> */
>
> +static inline void rwsem_assert_held(const struct rw_semaphore *sem)
> +{
> + if (IS_ENABLED(CONFIG_LOCKDEP))
> + lockdep_assert_held(sem);
> + else
> + rwsem_assert_held_nolockdep(sem);
> +}
> +
> +static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
> +{
> + if (IS_ENABLED(CONFIG_LOCKDEP))
> + lockdep_assert_held_write(sem);
> + else
> + rwsem_assert_held_write_nolockdep(sem);
> +}
> +
> /*
> * lock for reading
> */
Powered by blists - more mailing lists