lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <1317313134.4588.26.camel@gandalf.stny.rr.com>
Date:	Thu, 29 Sep 2011 12:18:53 -0400
From:	Steven Rostedt <rostedt@...dmis.org>
To:	LKML <linux-kernel@...r.kernel.org>
Cc:	Thomas Gleixner <tglx@...utronix.de>,
	Clark Williams <clark@...hat.com>,
	Peter Zijlstra <a.p.zijlstra@...llo.nl>
Subject: [PATCH][RT][RFC] rt: show owner of locks for blocked tasks in task
 dumps

I found this quite helpful in my debugging lately. When doing task
dumps, show the owner of a lock that a task may be blocked on. This is
especially helpful when lockdep is not enabled and we do not know what
task holds what lock. We still have the information of what owner holds
a lock that a task is blocked on.

Due to the sensitive nature of task dumps happening almost anywhere.
Tricks to the raw spinlocks are done to keep the dump from deadlocking.

Signed-off-by: Steven Rostedt <rostedt@...dmis.org>

Index: linux-mrg.git/kernel/rtmutex.c
===================================================================
--- linux-mrg.git.orig/kernel/rtmutex.c
+++ linux-mrg.git/kernel/rtmutex.c
@@ -1431,3 +1431,51 @@ int rt_mutex_finish_proxy_lock(struct rt
 
 	return ret;
 }
+
+void show_rt_lock_owner(struct task_struct *p)
+{
+	struct rt_mutex_waiter *waiter;
+	struct rt_mutex *lock;
+	struct task_struct *task;
+	unsigned long flags;
+	int locked = 0;
+
+	/*
+	 * This is called by stack dumps, we can't safely take a
+	 * lock. If we fail to get the lock then simply state so
+	 * and bail.
+	 */
+	if (!raw_spin_trylock_irqsave(&p->pi_lock, flags)) {
+		printk (" task pi_lock already held\n");
+		return;
+	}
+	waiter = p->pi_blocked_on;
+	if (!rt_mutex_real_waiter(waiter))
+		goto out;
+
+	/* Keep tasks from disappearing */
+	rcu_read_lock();
+
+	lock = waiter->lock;
+	/*
+	 * Used by task stack traces, try to synchronize but
+	 * don't block (we have the waiter pi_lock). If we fail to
+	 * take the lock, the worse that can happen is the owner changes
+	 * on us.
+	 */
+	if (raw_spin_trylock(&lock->wait_lock))
+		locked = 1;
+
+	task = rt_mutex_owner(lock);
+	if (task)
+		printk(KERN_CONT "  blocked on lock %p owned by %s:%d\n",
+		       lock, task->comm, task->pid);
+	else
+		printk(KERN_CONT "  blocked on lock %p with no owner\n", lock);
+
+	if (locked)
+		raw_spin_unlock(&lock->wait_lock);
+	rcu_read_unlock();
+ out:
+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+}
Index: linux-mrg.git/kernel/sched.c
===================================================================
--- linux-mrg.git.orig/kernel/sched.c
+++ linux-mrg.git/kernel/sched.c
@@ -6060,6 +6060,8 @@ void sched_show_task(struct task_struct 
 		(unsigned long)task_thread_info(p)->flags);
 
 	show_stack(p, NULL);
+
+	show_rt_lock_owner(p);
 }
 
 void show_state_filter(unsigned long state_filter)
Index: linux-mrg.git/include/linux/spinlock.h
===================================================================
--- linux-mrg.git.orig/include/linux/spinlock.h
+++ linux-mrg.git/include/linux/spinlock.h
@@ -273,6 +273,8 @@ static inline void do_raw_spin_unlock(ra
 # include <linux/spinlock_rt.h>
 #else /* PREEMPT_RT_FULL */
 
+static inline void show_rt_lock_owner(struct task_struct *p) { }
+
 /*
  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  */
Index: linux-mrg.git/include/linux/spinlock_rt.h
===================================================================
--- linux-mrg.git.orig/include/linux/spinlock_rt.h
+++ linux-mrg.git/include/linux/spinlock_rt.h
@@ -26,6 +26,8 @@ extern int __lockfunc rt_spin_trylock_bh
 extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
 extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
 
+extern void show_rt_lock_owner(struct task_struct *p);
+
 /*
  * lockdep-less calls, for derived types like rwlock:
  * (for trylock they can use rt_mutex_trylock() directly.


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ