[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250408122758.83495-1-ioworker0@gmail.com>
Date: Tue, 8 Apr 2025 20:27:57 +0800
From: Lance Yang <ioworker0@...il.com>
To: akpm@...ux-foundation.org
Cc: amaindex@...look.com,
anna.schumaker@...cle.com,
boqun.feng@...il.com,
ioworker0@...il.com,
joel.granados@...nel.org,
jstultz@...gle.com,
kent.overstreet@...ux.dev,
leonylgao@...cent.com,
linux-kernel@...r.kernel.org,
longman@...hat.com,
mhiramat@...nel.org,
mingo@...hat.com,
mingzhe.yang@...com,
peterz@...radead.org,
rostedt@...dmis.org,
senozhatsky@...omium.org,
tfiga@...omium.org,
will@...nel.org
Subject: Re: [PATCH v4 2/3] hung_task: show the blocker task if the task is hung on semaphore
Hi Andrew,
Thanks a lot for taking time to review!
On Tue, Apr 8, 2025 at 4:08 AM Andrew Morton <akpm@...ux-foundation.org> wrote:
>
> On Thu, 20 Mar 2025 14:49:22 +0800 Lance Yang <ioworker0@...il.com> wrote:
>
> > Inspired by mutex blocker tracking[1], this patch makes a trade-off to
> > balance the overhead and utility of the hung task detector.
> >
> > Unlike mutexes, semaphores lack explicit ownership tracking, making it
> > challenging to identify the root cause of hangs. To address this, we
> > introduce a last_holder field to the semaphore structure, which is
> > updated when a task successfully calls down() and cleared during up().
> >
> > The assumption is that if a task is blocked on a semaphore, the holders
> > must not have released it. While this does not guarantee that the last
> > holder is one of the current blockers, it likely provides a practical hint
> > for diagnosing semaphore-related stalls.
> >
> > With this change, the hung task detector can now show blocker task's info
> > like below:
>
> +#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
> +#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
> +#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
> +#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
> +#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
> +#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
> +#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
> +#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
>
> It looks pretty simple to reduce the amount of ifdeffery which this
> patch adds.
Good catch! We can reduce five ifdeffery with the following change ;)
diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c
index 3d06d4adc05b..db8a8f696f50 100644
--- a/kernel/locking/semaphore.c
+++ b/kernel/locking/semaphore.c
@@ -40,7 +40,41 @@ static noinline int __down_interruptible(struct semaphore *sem);
static noinline int __down_killable(struct semaphore *sem);
static noinline int __down_timeout(struct semaphore *sem, long timeout);
static noinline void __up(struct semaphore *sem);
-static inline void __sem_acquire(struct semaphore *sem);
+
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+static inline void hung_task_sem_set_holder(struct semaphore *sem)
+{
+ WRITE_ONCE((sem)->last_holder, (unsigned long)current);
+}
+
+static inline void hung_task_sem_clear_if_holder(struct semaphore *sem)
+{
+ if (READ_ONCE((sem)->last_holder) == (unsigned long)current)
+ WRITE_ONCE((sem)->last_holder, 0UL);
+}
+
+unsigned long sem_last_holder(struct semaphore *sem)
+{
+ return READ_ONCE(sem->last_holder);
+}
+#else
+static inline void hung_task_sem_set_holder(struct semaphore *sem)
+{
+}
+static inline void hung_task_sem_clear_if_holder(struct semaphore *sem)
+{
+}
+unsigned long sem_last_holder(struct semaphore *sem)
+{
+ return 0UL;
+}
+#endif
+
+static inline void __sem_acquire(struct semaphore *sem)
+{
+ sem->count--;
+ hung_task_sem_set_holder(sem);
+}
/**
* down - acquire the semaphore
@@ -188,10 +222,7 @@ void __sched up(struct semaphore *sem)
raw_spin_lock_irqsave(&sem->lock, flags);
-#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
- if (READ_ONCE(sem->last_holder) == (unsigned long)current)
- WRITE_ONCE(sem->last_holder, 0UL);
-#endif
+ hung_task_sem_clear_if_holder(sem);
if (likely(list_empty(&sem->wait_list)))
sem->count++;
@@ -233,9 +264,7 @@ static inline int __sched ___down_common(struct semaphore *sem, long state,
timeout = schedule_timeout(timeout);
raw_spin_lock_irq(&sem->lock);
if (waiter.up) {
-#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
- WRITE_ONCE(sem->last_holder, (unsigned long)current);
-#endif
+ hung_task_sem_set_holder(sem);
return 0;
}
}
@@ -254,17 +283,13 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
{
int ret;
-#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
hung_task_set_blocker(sem, BLOCKER_TYPE_SEM);
-#endif
trace_contention_begin(sem, 0);
ret = ___down_common(sem, state, timeout);
trace_contention_end(sem, ret);
-#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
hung_task_clear_blocker();
-#endif
return ret;
}
@@ -297,23 +322,3 @@ static noinline void __sched __up(struct semaphore *sem)
waiter->up = true;
wake_up_process(waiter->task);
}
-
-#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
-unsigned long sem_last_holder(struct semaphore *sem)
-{
- return READ_ONCE(sem->last_holder);
-}
-#else
-unsigned long sem_last_holder(struct semaphore *sem)
-{
- return 0UL;
-}
-#endif
-
-static inline void __sem_acquire(struct semaphore *sem)
-{
- sem->count--;
-#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
- WRITE_ONCE(sem->last_holder, (unsigned long)current);
-#endif
-}
---
[...]
> > static noinline void __down(struct semaphore *sem);
> > static noinline int __down_interruptible(struct semaphore *sem);
> > static noinline int __down_killable(struct semaphore *sem);
> > static noinline int __down_timeout(struct semaphore *sem, long timeout);
> > static noinline void __up(struct semaphore *sem);
> > +static inline void __sem_acquire(struct semaphore *sem);
>
> It feels Just Weird to forward declare a static inline. Is there a
> special reason for doing this?
Thanks for pointing this out.
Indeed, the forward declaratio was weird :(
Fixed by removing it as shown in the diff above.
Thanks,
Lance
Powered by blists - more mailing lists