lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1355975042.5896.54.camel@gandalf.local.home>
Date:	Wed, 19 Dec 2012 22:44:02 -0500
From:	Steven Rostedt <rostedt@...dmis.org>
To:	LKML <linux-kernel@...r.kernel.org>
Cc:	RT <linux-rt-users@...r.kernel.org>,
	Thomas Gleixner <tglx@...utronix.de>,
	Carsten Emde <cbe@...dl.org>,
	Clark Williams <clark@...hat.com>,
	John Kacur <jkacur@...hat.com>,
	Mike Galbraith <bitbucket@...ine.de>
Subject: Re: [RFC][PATCH RT] rtmutex: Use raw_spin_trylock() in
 rt_mutex_slowlock() to ease possible live locks

On Wed, 2012-12-19 at 20:31 -0500, Steven Rostedt wrote:

> 
> This is just an RFC patch to start discussion, not for inclusion. I may
> send another patch that implements #2 above.


Signed-off-by: Steven Rostedt <rostedt@...dmis.org>

Index: linux-rt.git/kernel/rtmutex.c
===================================================================
--- linux-rt.git.orig/kernel/rtmutex.c
+++ linux-rt.git/kernel/rtmutex.c
@@ -167,6 +167,39 @@ static void rt_mutex_wake_waiter(struct 
  */
 int max_lock_depth = 1024;
 
+static bool test_lock_waiter(struct task_struct *task,
+			     struct rt_mutex_waiter *waiter,
+			     struct rt_mutex_waiter *top_waiter,
+			     struct rt_mutex *orig_lock,
+			     struct rt_mutex_waiter *orig_waiter,
+			     int detect_deadlock)
+{
+	/*
+	 * Check the orig_waiter state. After we dropped the locks,
+	 * the previous owner of the lock might have released the lock.
+	 */
+	if (orig_waiter && !rt_mutex_owner(orig_lock))
+		return false;
+
+	/*
+	 * Drop out, when the task has no waiters. Note,
+	 * top_waiter can be NULL, when we are in the deboosting
+	 * mode!
+	 */
+	if (top_waiter && (!task_has_pi_waiters(task) ||
+			   top_waiter != task_top_pi_waiter(task)))
+		return false;
+
+	/*
+	 * When deadlock detection is off then we check, if further
+	 * priority adjustment is necessary.
+	 */
+	if (!detect_deadlock && waiter->list_entry.prio == task->prio)
+		return false;
+
+	return true;
+}
+
 /*
  * Adjust the priority chain. Also used for deadlock detection.
  * Decreases task's usage by one - may thus free the task.
@@ -225,34 +258,32 @@ static int rt_mutex_adjust_prio_chain(st
 	if (!rt_mutex_real_waiter(waiter))
 		goto out_unlock_pi;
 
-	/*
-	 * Check the orig_waiter state. After we dropped the locks,
-	 * the previous owner of the lock might have released the lock.
-	 */
-	if (orig_waiter && !rt_mutex_owner(orig_lock))
-		goto out_unlock_pi;
-
-	/*
-	 * Drop out, when the task has no waiters. Note,
-	 * top_waiter can be NULL, when we are in the deboosting
-	 * mode!
-	 */
-	if (top_waiter && (!task_has_pi_waiters(task) ||
-			   top_waiter != task_top_pi_waiter(task)))
-		goto out_unlock_pi;
-
-	/*
-	 * When deadlock detection is off then we check, if further
-	 * priority adjustment is necessary.
-	 */
-	if (!detect_deadlock && waiter->list_entry.prio == task->prio)
+	if (!test_lock_waiter(task, waiter, top_waiter,
+			      orig_lock, orig_waiter, deadlock_detect))
 		goto out_unlock_pi;
 
 	lock = waiter->lock;
 	if (!raw_spin_trylock(&lock->wait_lock)) {
 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-		cpu_relax();
-		goto retry;
+		/*
+		 * As raw_spin_locks are FIFO, we need to avoid being
+		 * starved out by other tasks that may be grabbing
+		 * the wait_lock. Grab both locks in the proper
+		 * order and test if anything changed. If it did
+		 * we need to drop them and try again.
+		 */
+		raw_spin_lock(&lock->wait_lock);
+		raw_spin_lock_irqsave(&task->pi_lock, flags);
+		if (waiter != task->pi_blocked_on ||
+		    lock != waiter->lock ||
+		    !test_lock_waiter(task, waiter, top_waiter,
+				      orig_lock, orig_waiter,
+				      detect_deadlock)) {
+			raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+			raw_spin_unlock(&lock->wait_lock);
+			cpu_relax();
+			goto retry;
+		}
 	}
 
 	/* Deadlock detection */


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ