lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ed1d5f9ec17a5b8d758c234562dad47cfc872ed8.camel@gmx.de>
Date:   Sun, 01 Aug 2021 17:14:49 +0200
From:   Mike Galbraith <efault@....de>
To:     Thomas Gleixner <tglx@...utronix.de>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>
Cc:     LKML <linux-kernel@...r.kernel.org>,
        linux-rt-users@...r.kernel.org,
        Steven Rostedt <rostedt@...dmis.org>,
        Peter Zijlstra <peterz@...radead.org>
Subject: Re: v5.14-rc3-rt1 losing wakeups?

On Sun, 2021-08-01 at 05:36 +0200, Mike Galbraith wrote:
> On Fri, 2021-07-30 at 22:49 +0200, Thomas Gleixner wrote:
> > >
> > > First symptom is KDE/Plasma's task manager going comatose.  Notice soon
> >
> > KDE/Plasma points at the new fangled rtmutex based ww_mutex from
> > Peter.
>
> Seems not.  When booting KVM box with nomodeset, there's exactly one
> early boot ww_mutex lock/unlock, ancient history at the failure point.

As you've probably already surmised given it isn't the ww_mutex bits,
it's the wake_q bits.  Apply the below, 5.14-rt ceases to fail.  Take
perfectly healthy 5.13-rt, apply those bits, and it instantly begins
failing as 5.14-rt had been.

---
 include/linux/sched/wake_q.h    |    7 +------
 kernel/futex.c                  |    4 ++--
 kernel/locking/rtmutex.c        |   18 +++++++++++-------
 kernel/locking/rtmutex_api.c    |    6 +++---
 kernel/locking/rtmutex_common.h |   22 +++++++++++-----------
 kernel/sched/core.c             |    4 ++--
 6 files changed, 30 insertions(+), 31 deletions(-)

--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -61,11 +61,6 @@ static inline bool wake_q_empty(struct w

 extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
 extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
-extern void __wake_up_q(struct wake_q_head *head, unsigned int state);
-
-static inline void wake_up_q(struct wake_q_head *head)
-{
-	__wake_up_q(head, TASK_NORMAL);
-}
+extern void wake_up_q(struct wake_q_head *head);

 #endif /* _LINUX_SCHED_WAKE_Q_H */
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1499,11 +1499,11 @@ static void mark_wake_futex(struct wake_
  */
 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
 {
+	DEFINE_RT_MUTEX_WAKE_Q_HEAD(wqh);
+	u32 curval, newval;
 	struct rt_mutex_waiter *top_waiter;
 	struct task_struct *new_owner;
 	bool postunlock = false;
-	DEFINE_RT_WAKE_Q(wqh);
-	u32 curval, newval;
 	int ret = 0;

 	top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -425,20 +425,24 @@ static __always_inline void rt_mutex_adj
 }

 /* RT mutex specific wake_q wrappers */
-static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
+static __always_inline void rt_mutex_wake_q_add(struct rt_mutex_wake_q_head *wqh,
 						struct rt_mutex_waiter *w)
 {
 	if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state != TASK_NORMAL) {
-		wake_q_add(&wqh->rt_head, w->task);
+		get_task_struct(w->task);
+		wqh->rtlock_task = w->task;
 	} else {
 		wake_q_add(&wqh->head, w->task);
 	}
 }

-static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
+static __always_inline void rt_mutex_wake_up_q(struct rt_mutex_wake_q_head *wqh)
 {
-	if (IS_ENABLED(CONFIG_PREEMPT_RT) && !wake_q_empty(&wqh->rt_head))
-		__wake_up_q(&wqh->rt_head, TASK_RTLOCK_WAIT);
+	if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
+		wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
+		put_task_struct(wqh->rtlock_task);
+		wqh->rtlock_task = NULL;
+	}

 	if (!wake_q_empty(&wqh->head))
 		wake_up_q(&wqh->head);
@@ -1111,7 +1115,7 @@ static int __sched task_blocks_on_rt_mut
  *
  * Called with lock->wait_lock held and interrupts disabled.
  */
-static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
+static void __sched mark_wakeup_next_waiter(struct rt_mutex_wake_q_head *wqh,
 					    struct rt_mutex_base *lock)
 {
 	struct rt_mutex_waiter *waiter;
@@ -1210,7 +1214,7 @@ static __always_inline int __rt_mutex_tr
  */
 static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
 {
-	DEFINE_RT_WAKE_Q(wqh);
+	DEFINE_RT_MUTEX_WAKE_Q_HEAD(wqh);
 	unsigned long flags;

 	/* irqsave required to support early boot calls */
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -141,7 +141,7 @@ int __sched __rt_mutex_futex_trylock(str
  * @wqh:	The wake queue head from which to get the next lock waiter
  */
 bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
-				     struct rt_wake_q_head *wqh)
+				     struct rt_mutex_wake_q_head *wqh)
 {
 	lockdep_assert_held(&lock->wait_lock);

@@ -165,7 +165,7 @@ bool __sched __rt_mutex_futex_unlock(str

 void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
 {
-	DEFINE_RT_WAKE_Q(wqh);
+	DEFINE_RT_MUTEX_WAKE_Q_HEAD(wqh);
 	unsigned long flags;
 	bool postunlock;

@@ -454,7 +454,7 @@ void __sched rt_mutex_adjust_pi(struct t
 /*
  * Performs the wakeup of the top-waiter and re-enables preemption.
  */
-void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh)
+void __sched rt_mutex_postunlock(struct rt_mutex_wake_q_head *wqh)
 {
 	rt_mutex_wake_up_q(wqh);
 }
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -42,20 +42,20 @@ struct rt_mutex_waiter {
 };

 /**
- * rt_wake_q_head - Wrapper around regular wake_q_head to support
- *		    "sleeping" spinlocks on RT
- * @head:	The regular wake_q_head for sleeping lock variants
- * @rt_head:	The wake_q_head for RT lock (spin/rwlock) variants
+ * rt_mutex_wake_q_head - Wrapper around regular wake_q_head to support
+ *			  "sleeping" spinlocks on RT
+ * @head:		The regular wake_q_head for sleeping lock variants
+ * @rtlock_task:	Task pointer for RT lock (spin/rwlock) wakeups
  */
-struct rt_wake_q_head {
+struct rt_mutex_wake_q_head {
 	struct wake_q_head	head;
-	struct wake_q_head	rt_head;
+	struct task_struct	*rtlock_task;
 };

-#define DEFINE_RT_WAKE_Q(name)						\
-	struct rt_wake_q_head name = {					\
+#define DEFINE_RT_MUTEX_WAKE_Q_HEAD(name)				\
+	struct rt_mutex_wake_q_head name = {				\
 		.head		= WAKE_Q_HEAD_INITIALIZER(name.head),	\
-		.rt_head	= WAKE_Q_HEAD_INITIALIZER(name.rt_head),\
+		.rtlock_task	= NULL,					\
 	}

 /*
@@ -81,9 +81,9 @@ extern int __rt_mutex_futex_trylock(stru

 extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
 extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
-				struct rt_wake_q_head *wqh);
+				struct rt_mutex_wake_q_head *wqh);

-extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh);
+extern void rt_mutex_postunlock(struct rt_mutex_wake_q_head *wqh);

 /*
  * Must be guarded because this header is included from rcu/tree_plugin.h
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -920,7 +920,7 @@ void wake_q_add_safe(struct wake_q_head
 		put_task_struct(task);
 }

-void __wake_up_q(struct wake_q_head *head, unsigned int state)
+void wake_up_q(struct wake_q_head *head)
 {
 	struct wake_q_node *node = head->first;

@@ -936,7 +936,7 @@ void __wake_up_q(struct wake_q_head *hea
 		 * wake_up_process() executes a full barrier, which pairs with
 		 * the queueing in wake_q_add() so as not to miss wakeups.
 		 */
-		wake_up_state(task, state);
+		wake_up_process(task);
 		put_task_struct(task);
 	}
 }


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ