[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20250731061856.1858898-1-nysal@linux.ibm.com>
Date: Thu, 31 Jul 2025 11:48:53 +0530
From: "Nysal Jan K.A." <nysal@...ux.ibm.com>
To: Madhavan Srinivasan <maddy@...ux.ibm.com>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Nicholas Piggin <npiggin@...il.com>
Cc: "Nysal Jan K.A." <nysal@...ux.ibm.com>,
Michael Ellerman <mpe@...erman.id.au>, linuxppc-dev@...ts.ozlabs.org,
linux-kernel@...r.kernel.org
Subject: [PATCH v2] powerpc/qspinlock: Add spinlock contention tracepoint
Add a lock contention tracepoint in the queued spinlock slowpath.
Also add the __lockfunc annotation so that in_lock_functions()
works as expected.
Signed-off-by: Nysal Jan K.A. <nysal@...ux.ibm.com>
---
arch/powerpc/lib/qspinlock.c | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index bcc7e4dff8c3..95ab4cdf582e 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -9,6 +9,7 @@
#include <linux/sched/clock.h>
#include <asm/qspinlock.h>
#include <asm/paravirt.h>
+#include <trace/events/lock.h>
#define MAX_NODES 4
@@ -708,26 +709,26 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
qnodesp->count--;
}
-void queued_spin_lock_slowpath(struct qspinlock *lock)
+void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock)
{
+ trace_contention_begin(lock, LCB_F_SPIN);
/*
* This looks funny, but it induces the compiler to inline both
* sides of the branch rather than share code as when the condition
* is passed as the paravirt argument to the functions.
*/
if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) {
- if (try_to_steal_lock(lock, true)) {
+ if (try_to_steal_lock(lock, true))
spec_barrier();
- return;
- }
- queued_spin_lock_mcs_queue(lock, true);
+ else
+ queued_spin_lock_mcs_queue(lock, true);
} else {
- if (try_to_steal_lock(lock, false)) {
+ if (try_to_steal_lock(lock, false))
spec_barrier();
- return;
- }
- queued_spin_lock_mcs_queue(lock, false);
+ else
+ queued_spin_lock_mcs_queue(lock, false);
}
+ trace_contention_end(lock, 0);
}
EXPORT_SYMBOL(queued_spin_lock_slowpath);
--
2.47.0
Powered by blists - more mailing lists