The adaptive spin patches introduced an overdesigned optimization for the adaptive path. This avoidance of those code pathes is not worth the extra conditionals and furthermore it is inconsistent in itself. Remove it and use the same mechanism as the other lock pathes. That way we have a consistent state manipulation scheme and less extra cases. Signed-off-by: Thomas Gleixner --- kernel/rtmutex.c | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) Index: linux-2.6.24/kernel/rtmutex.c =================================================================== --- linux-2.6.24.orig/kernel/rtmutex.c +++ linux-2.6.24/kernel/rtmutex.c @@ -848,7 +848,6 @@ rt_spin_lock_slowlock(struct rt_mutex *l struct rt_mutex_waiter waiter; unsigned long saved_state, flags; struct task_struct *orig_owner; - int missed = 0; debug_rt_mutex_init_waiter(&waiter); waiter.task = NULL; @@ -870,20 +869,15 @@ rt_spin_lock_slowlock(struct rt_mutex *l * about original state TASK_INTERRUPTIBLE as well, as we * could miss a wakeup_interruptible() */ - saved_state = current->state; + saved_state = rt_set_current_blocked_state(current->state); for (;;) { unsigned long saved_flags; int saved_lock_depth = current->lock_depth; /* Try to acquire the lock */ - if (do_try_to_take_rt_mutex(lock, STEAL_LATERAL)) { - /* If we never blocked break out now */ - if (!missed) - goto unlock; + if (do_try_to_take_rt_mutex(lock, STEAL_LATERAL)) break; - } - missed = 1; /* * waiter.task is NULL the first time we come here and @@ -913,12 +907,6 @@ rt_spin_lock_slowlock(struct rt_mutex *l if (adaptive_wait(&waiter, orig_owner)) { put_task_struct(orig_owner); - saved_state = rt_set_current_blocked_state(saved_state); - /* - * The xchg() in update_current() is an implicit - * barrier which we rely upon to ensure current->state - * is visible before we test waiter.task. - */ if (waiter.task) schedule_rt_mutex(lock); } else @@ -927,6 +915,7 @@ rt_spin_lock_slowlock(struct rt_mutex *l spin_lock_irqsave(&lock->wait_lock, flags); current->flags |= saved_flags; current->lock_depth = saved_lock_depth; + saved_state = rt_set_current_blocked_state(saved_state); } rt_restore_current_state(saved_state); @@ -945,7 +934,6 @@ rt_spin_lock_slowlock(struct rt_mutex *l */ fixup_rt_mutex_waiters(lock); - unlock: spin_unlock_irqrestore(&lock->wait_lock, flags); debug_rt_mutex_free_waiter(&waiter); -- -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/