[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250829080735.3598416-6-ankur.a.arora@oracle.com>
Date: Fri, 29 Aug 2025 01:07:35 -0700
From: Ankur Arora <ankur.a.arora@...cle.com>
To: linux-kernel@...r.kernel.org, linux-arch@...r.kernel.org,
linux-arm-kernel@...ts.infradead.org, bpf@...r.kernel.org
Cc: arnd@...db.de, catalin.marinas@....com, will@...nel.org,
peterz@...radead.org, akpm@...ux-foundation.org, mark.rutland@....com,
harisokn@...zon.com, cl@...two.org, ast@...nel.org, memxor@...il.com,
zhenglifeng1@...wei.com, xueshuai@...ux.alibaba.com,
joao.m.martins@...cle.com, boris.ostrovsky@...cle.com,
konrad.wilk@...cle.com
Subject: [PATCH v4 5/5] rqspinlock: use smp_cond_load_acquire_timewait()
Use smp_cond_load_acquire_timewait() to define
res_atomic_cond_read_acquire() and res_smp_cond_load_acquire_timewait().
The timeout check for both is done via RES_CHECK_TIMEOUT(). Define
res_smp_cond_load_acquire_waiting() to allow it to amortize the
check for spin-wait implementations.
Cc: Kumar Kartikeya Dwivedi <memxor@...il.com>
Cc: Alexei Starovoitov <ast@...nel.org>
Signed-off-by: Ankur Arora <ankur.a.arora@...cle.com>
---
arch/arm64/include/asm/rqspinlock.h | 3 +++
include/asm-generic/rqspinlock.h | 4 ++++
kernel/bpf/rqspinlock.c | 25 +++++++++----------------
3 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/include/asm/rqspinlock.h b/arch/arm64/include/asm/rqspinlock.h
index a385603436e9..ce8feadeb9a9 100644
--- a/arch/arm64/include/asm/rqspinlock.h
+++ b/arch/arm64/include/asm/rqspinlock.h
@@ -3,6 +3,9 @@
#define _ASM_RQSPINLOCK_H
#include <asm/barrier.h>
+
+#define res_smp_cond_load_acquire_waiting() arch_timer_evtstrm_available()
+
#include <asm-generic/rqspinlock.h>
#endif /* _ASM_RQSPINLOCK_H */
diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
index 6d4244d643df..4b49c0ddf89a 100644
--- a/include/asm-generic/rqspinlock.h
+++ b/include/asm-generic/rqspinlock.h
@@ -247,4 +247,8 @@ static __always_inline void res_spin_unlock(rqspinlock_t *lock)
#define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); })
+#ifndef res_smp_cond_load_acquire_waiting
+#define res_smp_cond_load_acquire_waiting() 0
+#endif
+
#endif /* __ASM_GENERIC_RQSPINLOCK_H */
diff --git a/kernel/bpf/rqspinlock.c b/kernel/bpf/rqspinlock.c
index 5ab354d55d82..8de1395422e8 100644
--- a/kernel/bpf/rqspinlock.c
+++ b/kernel/bpf/rqspinlock.c
@@ -82,6 +82,7 @@ struct rqspinlock_timeout {
u64 duration;
u64 cur;
u16 spin;
+ u8 wait;
};
#define RES_TIMEOUT_VAL 2
@@ -241,26 +242,20 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
}
/*
- * Do not amortize with spins when res_smp_cond_load_acquire is defined,
- * as the macro does internal amortization for us.
+ * Only amortize with spins when we don't have a waiting implementation.
*/
-#ifndef res_smp_cond_load_acquire
#define RES_CHECK_TIMEOUT(ts, ret, mask) \
({ \
- if (!(ts).spin++) \
+ if ((ts).wait || !(ts).spin++) \
(ret) = check_timeout((lock), (mask), &(ts)); \
(ret); \
})
-#else
-#define RES_CHECK_TIMEOUT(ts, ret, mask) \
- ({ (ret) = check_timeout((lock), (mask), &(ts)); })
-#endif
/*
* Initialize the 'spin' member.
* Set spin member to 0 to trigger AA/ABBA checks immediately.
*/
-#define RES_INIT_TIMEOUT(ts) ({ (ts).spin = 0; })
+#define RES_INIT_TIMEOUT(ts) ({ (ts).spin = 0; (ts).wait = res_smp_cond_load_acquire_waiting(); })
/*
* We only need to reset 'timeout_end', 'spin' will just wrap around as necessary.
@@ -313,11 +308,8 @@ EXPORT_SYMBOL_GPL(resilient_tas_spin_lock);
*/
static DEFINE_PER_CPU_ALIGNED(struct qnode, rqnodes[_Q_MAX_NODES]);
-#ifndef res_smp_cond_load_acquire
-#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire(v, c)
-#endif
-
-#define res_atomic_cond_read_acquire(v, c) res_smp_cond_load_acquire(&(v)->counter, (c))
+#define res_atomic_cond_read_acquire(v, c, t) smp_cond_load_acquire_timewait(&(v)->counter, (c), (t))
+#define res_smp_cond_load_acquire_timewait(v, c, t) smp_cond_load_acquire_timewait(v, (c), (t))
/**
* resilient_queued_spin_lock_slowpath - acquire the queued spinlock
@@ -418,7 +410,8 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
*/
if (val & _Q_LOCKED_MASK) {
RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT);
- res_smp_cond_load_acquire(&lock->locked, !VAL || RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_MASK));
+ res_smp_cond_load_acquire_timewait(&lock->locked, !VAL,
+ RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_MASK));
}
if (ret) {
@@ -572,7 +565,7 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val)
* us.
*/
RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT * 2);
- val = res_atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK) ||
+ val = res_atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK),
RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_PENDING_MASK));
waitq_timeout:
--
2.31.1
Powered by blists - more mailing lists