[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220619142038.1274-1-mintupatel89@gmail.com>
Date: Sun, 19 Jun 2022 19:50:38 +0530
From: Mintu Patel <mintupatel89@...il.com>
To: mintupatel89@...il.com
Cc: Chinmoy Ghosh <chinmoyghosh2001@...il.com>,
Vishal Badole <badolevishal1116@...il.com>,
Vimal Kumar <vimal.kumar32@...il.com>,
Steven Rostedt <rostedt@...dmis.org>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Will Deacon <will@...nel.org>, linux-kernel@...r.kernel.org
Subject: [PATCH] rt_spin_lock: To list the correct owner of rt_spin_lock
rt_spin_lock is actually mutex on RT Kernel so it goes for contention
for lock. Currently owners of rt_spin_lock are decided before actual
acquiring of lock. This patch would depict the correct owner of
rt_spin_lock. The patch would help in solving crashes and deadlock
due to race condition of lock
acquiring rt_spin_lock acquired the lock released the lock
<--------> <------->
contention period Held period
Thread1 Thread2
_try_to_take_rt_mutex+0x95c+0x74 enqueue_task_dl+0x8cc/0x8dc
rt_spin_lock_slowlock_locked+0xac+2 rt_mutex_setprio+0x28c/0x574
rt_spin_lock_slowlock+0x5c/0x90 task_blocks_rt_mutex+0x240/0x310
rt_spin_lock+0x58/0x5c rt_spin_lock_slowlock_locked+0xac/0x2
driverA_acquire_lock+0x28/0x56 rt_spin_lock_slowlock+0x5c/0x90
rt_spin_lock+0x58/0x5c
driverB_acquire_lock+0x48/0x6c
As per above call traces sample, Thread1 acquired the rt_spin_lock and
went to critical section on the other hand Thread2 kept trying to acquire
the same rt_spin_lock held by Thread1 ie contention period is too high.
Finally Thread2 entered to dl queue due to high held time of the lock by
Thread1. The below patch would help us to know the correct owner of
rt_spin_lock and point us the driver's critical section. Respective
driver need to be debugged for longer held period of lock.
ex: cat /sys/kernel/debug/tracing/trace
kworker/u13:0-150 [003] .....11 202.761025: rt_spinlock_acquire:
Process: kworker/u13:0 is acquiring lock: &kbdev->hwaccess_lock
kworker/u13:0-150 [003] .....11 202.761039: rt_spinlock_acquired:
Process: kworker/u13:0 has acquired lock: &kbdev->hwaccess_lock
kworker/u13:0-150 [003] .....11 202.761042: rt_spinlock_released:
Process: kworker/u13:0 has released lock: &kbdev->hwaccess_lock
Signed-off-by: Mintu Patel <mintupatel89@...il.com>
Signed-off-by: Chinmoy Ghosh <chinmoyghosh2001@...il.com>
Signed-off-by: Vishal Badole <badolevishal1116@...il.com>
Signed-off-by: Vimal Kumar <vimal.kumar32@...il.com>
---
include/trace/events/lock.h | 59 +++++++++++++++++++++++++++++++++++++
kernel/locking/rtmutex.c | 10 +++++++
2 files changed, 69 insertions(+)
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index d7512129a324..c250a83ed995 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -35,6 +35,65 @@ TRACE_EVENT(lock_acquire,
(__entry->flags & 2) ? "read " : "",
__get_str(name))
);
+TRACE_EVENT(rt_spinlock_acquire,
+
+ TP_PROTO(struct lockdep_map *lock, struct task_struct *pname),
+
+ TP_ARGS(lock, pname),
+
+ TP_STRUCT__entry(
+ __string(name, lock->name)
+ __string(process_name, pname->comm)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, lock->name);
+ __assign_str(process_name, pname->comm);
+ ),
+
+ TP_printk("Process: %s is acquiring lock: %s", __get_str(process_name),
+ __get_str(name))
+);
+
+TRACE_EVENT(rt_spinlock_acquired,
+
+ TP_PROTO(struct lockdep_map *lock, struct task_struct *pname),
+
+ TP_ARGS(lock, pname),
+
+ TP_STRUCT__entry(
+ __string(name, lock->name)
+ __string(process_name, pname->comm)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, lock->name);
+ __assign_str(process_name, pname->comm);
+ ),
+
+ TP_printk("Process: %s has acquired lock: %s", __get_str(process_name),
+ __get_str(name))
+);
+
+TRACE_EVENT(rt_spinlock_released,
+
+ TP_PROTO(struct lockdep_map *lock, struct task_struct *pname),
+
+ TP_ARGS(lock, pname),
+
+ TP_STRUCT__entry(
+ __string(name, lock->name)
+ __string(process_name, pname->comm)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, lock->name);
+ __assign_str(process_name, pname->comm);
+ ),
+
+ TP_printk("Process: %s has released lock: %s", __get_str(process_name),
+ __get_str(name))
+);
DECLARE_EVENT_CLASS(lock,
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 602eb7821a1b..f7cba05fbe74 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -26,6 +26,7 @@
#include <linux/timer.h>
#include <linux/ww_mutex.h>
#include <linux/blkdev.h>
+#include <trace/events/lock.h>
#include "rtmutex_common.h"
@@ -1144,7 +1145,13 @@ void __lockfunc rt_spin_lock(spinlock_t *lock)
rcu_read_lock();
migrate_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+#ifdef CONFIG_RT_SPIN_LOCK_TRACING
+ trace_rt_spinlock_acquire(&lock->dep_map, current);
+#endif
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+#ifdef CONFIG_RT_SPIN_LOCK_TRACING
+ trace_rt_spinlock_acquired(&lock->dep_map, current);
+#endif
}
EXPORT_SYMBOL(rt_spin_lock);
@@ -1169,6 +1176,9 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock)
{
/* NOTE: we always pass in '1' for nested, for simplicity */
spin_release(&lock->dep_map, 1, _RET_IP_);
+#ifdef CONFIG_RT_SPIN_LOCK_TRACING
+ trace_rt_spinlock_released(&lock->dep_map, current);
+#endif
rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
migrate_enable();
rcu_read_unlock();
--
2.25.1
Powered by blists - more mailing lists