[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251117191515.2934026-1-ameryhung@gmail.com>
Date: Mon, 17 Nov 2025 11:15:15 -0800
From: Amery Hung <ameryhung@...il.com>
To: bpf@...r.kernel.org
Cc: netdev@...r.kernel.org,
alexei.starovoitov@...il.com,
andrii@...nel.org,
daniel@...earbox.net,
memxor@...il.com,
ameryhung@...il.com,
kernel-team@...a.com
Subject: [PATCH bpf-next v1 1/1] bpf: Annotate rqspinlock lock acquiring functions with __must_check
Locking a resilient queued spinlock can fail when deadlock or timeout
happen. Mark the lock acquring functions with __must_check to make sure
callers always handle the returned error.
Suggested-by: Andrii Nakryiko <andrii@...nel.org>
Signed-off-by: Amery Hung <ameryhung@...il.com>
---
include/asm-generic/rqspinlock.h | 47 +++++++++++++++++++-------------
1 file changed, 28 insertions(+), 19 deletions(-)
diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
index 6d4244d643df..855c09435506 100644
--- a/include/asm-generic/rqspinlock.h
+++ b/include/asm-generic/rqspinlock.h
@@ -171,7 +171,7 @@ static __always_inline void release_held_lock_entry(void)
* * -EDEADLK - Lock acquisition failed because of AA/ABBA deadlock.
* * -ETIMEDOUT - Lock acquisition failed because of timeout.
*/
-static __always_inline int res_spin_lock(rqspinlock_t *lock)
+static __always_inline __must_check int res_spin_lock(rqspinlock_t *lock)
{
int val = 0;
@@ -223,27 +223,36 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock)
#define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; })
#endif
-#define raw_res_spin_lock(lock) \
- ({ \
- int __ret; \
- preempt_disable(); \
- __ret = res_spin_lock(lock); \
- if (__ret) \
- preempt_enable(); \
- __ret; \
- })
+static __always_inline __must_check int raw_res_spin_lock(rqspinlock_t *lock)
+{
+ int ret;
+
+ preempt_disable();
+ ret = res_spin_lock(lock);
+ if (ret)
+ preempt_enable();
+
+ return ret;
+}
#define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); })
-#define raw_res_spin_lock_irqsave(lock, flags) \
- ({ \
- int __ret; \
- local_irq_save(flags); \
- __ret = raw_res_spin_lock(lock); \
- if (__ret) \
- local_irq_restore(flags); \
- __ret; \
- })
+static __always_inline __must_check int
+__raw_res_spin_lock_irqsave(rqspinlock_t *lock, unsigned long *flags)
+{
+ unsigned long __flags;
+ int ret;
+
+ local_irq_save(__flags);
+ ret = raw_res_spin_lock(lock);
+ if (ret)
+ local_irq_restore(__flags);
+
+ *flags = __flags;
+ return ret;
+}
+
+#define raw_res_spin_lock_irqsave(lock, flags) __raw_res_spin_lock_irqsave(lock, &flags)
#define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); })
--
2.47.3
Powered by blists - more mailing lists