We should deboost before waking the high-priority task, such that we don't run two tasks with the same "state" (priority, deadline, sched_class, etc). In order to make sure the boosting task doesn't start running between unlock and deboost (due to 'spurious' wakeup), we move the deboost under the wait_lock, that way its serialized against the wait loop in __rt_mutex_slowlock(). Doing the deboost early can however lead to priority-inversion if current would get preempted after the deboost but before waking our high-prio task, hence we disable preemption before doing deboost, and enabling it after the wake up is over. This gets us the right semantic order, but most importantly however; this change ensures pointer stability for the next patch, where we have rt_mutex_setprio() cache a pointer to the top-most waiter task. If we, as before this change, do the wakeup first and then deboost, this pointer might point into thin air. [peterz: Changelog + patch munging] Cc: Ingo Molnar Cc: Juri Lelli Acked-by: Steven Rostedt Suggested-by: Peter Zijlstra Signed-off-by: Xunlei Pang Signed-off-by: Peter Zijlstra (Intel) --- kernel/futex.c | 5 +--- kernel/locking/rtmutex.c | 48 ++++++++++++++++++++-------------------- kernel/locking/rtmutex_common.h | 1 3 files changed, 28 insertions(+), 26 deletions(-) --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1374,9 +1374,8 @@ static int wake_futex_pi(u32 __user *uad * scheduled away before the wake up can take place. */ spin_unlock(&hb->lock); - wake_up_q(&wake_q); - if (deboost) - rt_mutex_adjust_prio(current); + + rt_mutex_postunlock(&wake_q, deboost); return 0; } --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -307,24 +307,6 @@ static void __rt_mutex_adjust_prio(struc } /* - * Adjust task priority (undo boosting). Called from the exit path of - * rt_mutex_slowunlock() and rt_mutex_slowlock(). - * - * (Note: We do this outside of the protection of lock->wait_lock to - * allow the lock to be taken while or before we readjust the priority - * of task. We do not use the spin_xx_mutex() variants here as we are - * outside of the debug path.) - */ -void rt_mutex_adjust_prio(struct task_struct *task) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&task->pi_lock, flags); - __rt_mutex_adjust_prio(task); - raw_spin_unlock_irqrestore(&task->pi_lock, flags); -} - -/* * Deadlock detection is conditional: * * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted @@ -987,6 +969,7 @@ static void mark_wakeup_next_waiter(stru * lock->wait_lock. */ rt_mutex_dequeue_pi(current, waiter); + __rt_mutex_adjust_prio(current); /* * As we are waking up the top waiter, and the waiter stays @@ -1325,6 +1308,16 @@ static bool __sched rt_mutex_slowunlock( */ mark_wakeup_next_waiter(wake_q, lock); + /* + * We should deboost before waking the top waiter task such that + * we don't run two tasks with the 'same' priority. This however + * can lead to prio-inversion if we would get preempted after + * the deboost but before waking our high-prio task, hence the + * preempt_disable before unlock. Pairs with preempt_enable() in + * rt_mutex_postunlock(); + */ + preempt_disable(); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); /* check PI boosting */ @@ -1390,14 +1383,23 @@ rt_mutex_fastunlock(struct rt_mutex *loc } else { bool deboost = slowfn(lock, &wake_q); - wake_up_q(&wake_q); - - /* Undo pi boosting if necessary: */ - if (deboost) - rt_mutex_adjust_prio(current); + rt_mutex_postunlock(&wake_q, deboost); } } + +/* + * Undo pi boosting (if necessary) and wake top waiter. + */ +void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost) +{ + wake_up_q(wake_q); + + /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ + if (deboost) + preempt_enable(); +} + /** * rt_mutex_lock - lock a rt_mutex * --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -111,6 +111,7 @@ extern int rt_mutex_finish_proxy_lock(st extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); extern bool rt_mutex_futex_unlock(struct rt_mutex *lock, struct wake_q_head *wqh); +extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost); extern void rt_mutex_adjust_prio(struct task_struct *task); #ifdef CONFIG_DEBUG_RT_MUTEXES