[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAP01T74CcZqt9W8Y5T3NYheU8HyGataKXFw99cnLC46ZV9oFPQ@mail.gmail.com>
Date: Tue, 18 Nov 2025 05:16:50 -0500
From: Kumar Kartikeya Dwivedi <memxor@...il.com>
To: Amery Hung <ameryhung@...il.com>
Cc: bpf@...r.kernel.org, netdev@...r.kernel.org, alexei.starovoitov@...il.com,
andrii@...nel.org, daniel@...earbox.net, kernel-team@...a.com
Subject: Re: [PATCH bpf-next v1 1/1] bpf: Annotate rqspinlock lock acquiring
functions with __must_check
On Mon, 17 Nov 2025 at 14:15, Amery Hung <ameryhung@...il.com> wrote:
>
> Locking a resilient queued spinlock can fail when deadlock or timeout
> happen. Mark the lock acquring functions with __must_check to make sure
> callers always handle the returned error.
>
> Suggested-by: Andrii Nakryiko <andrii@...nel.org>
> Signed-off-by: Amery Hung <ameryhung@...il.com>
> ---
Looks like it's working :)
I would just explicitly ignore with (void) cast the locktorture case.
After that is fixed, you can add:
Acked-by: Kumar Kartikeya Dwivedi <memxor@...il.com>
Thanks!
> include/asm-generic/rqspinlock.h | 47 +++++++++++++++++++-------------
> 1 file changed, 28 insertions(+), 19 deletions(-)
>
> diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
> index 6d4244d643df..855c09435506 100644
> --- a/include/asm-generic/rqspinlock.h
> +++ b/include/asm-generic/rqspinlock.h
> @@ -171,7 +171,7 @@ static __always_inline void release_held_lock_entry(void)
> * * -EDEADLK - Lock acquisition failed because of AA/ABBA deadlock.
> * * -ETIMEDOUT - Lock acquisition failed because of timeout.
> */
> -static __always_inline int res_spin_lock(rqspinlock_t *lock)
> +static __always_inline __must_check int res_spin_lock(rqspinlock_t *lock)
> {
> int val = 0;
>
> @@ -223,27 +223,36 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock)
> #define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; })
> #endif
>
> -#define raw_res_spin_lock(lock) \
> - ({ \
> - int __ret; \
> - preempt_disable(); \
> - __ret = res_spin_lock(lock); \
> - if (__ret) \
> - preempt_enable(); \
> - __ret; \
> - })
> +static __always_inline __must_check int raw_res_spin_lock(rqspinlock_t *lock)
> +{
> + int ret;
> +
> + preempt_disable();
> + ret = res_spin_lock(lock);
> + if (ret)
> + preempt_enable();
> +
> + return ret;
> +}
>
> #define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); })
>
> -#define raw_res_spin_lock_irqsave(lock, flags) \
> - ({ \
> - int __ret; \
> - local_irq_save(flags); \
> - __ret = raw_res_spin_lock(lock); \
> - if (__ret) \
> - local_irq_restore(flags); \
> - __ret; \
> - })
> +static __always_inline __must_check int
> +__raw_res_spin_lock_irqsave(rqspinlock_t *lock, unsigned long *flags)
> +{
> + unsigned long __flags;
> + int ret;
> +
> + local_irq_save(__flags);
> + ret = raw_res_spin_lock(lock);
> + if (ret)
> + local_irq_restore(__flags);
> +
> + *flags = __flags;
> + return ret;
> +}
> +
> +#define raw_res_spin_lock_irqsave(lock, flags) __raw_res_spin_lock_irqsave(lock, &flags)
>
> #define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); })
>
> --
> 2.47.3
>
Powered by blists - more mailing lists