[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230819060915.3001568-9-jstultz@google.com>
Date: Sat, 19 Aug 2023 06:08:42 +0000
From: John Stultz <jstultz@...gle.com>
To: LKML <linux-kernel@...r.kernel.org>
Cc: John Stultz <jstultz@...gle.com>,
Joel Fernandes <joelaf@...gle.com>,
Qais Yousef <qyousef@...gle.com>,
Ingo Molnar <mingo@...hat.com>,
Peter Zijlstra <peterz@...radead.org>,
Juri Lelli <juri.lelli@...hat.com>,
Vincent Guittot <vincent.guittot@...aro.org>,
Dietmar Eggemann <dietmar.eggemann@....com>,
Valentin Schneider <vschneid@...hat.com>,
Steven Rostedt <rostedt@...dmis.org>,
Ben Segall <bsegall@...gle.com>,
Zimuzo Ezeozue <zezeozue@...gle.com>,
Youssef Esmat <youssefesmat@...gle.com>,
Mel Gorman <mgorman@...e.de>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
"Paul E . McKenney" <paulmck@...nel.org>, kernel-team@...roid.com
Subject: [PATCH v5 08/19] locking/mutex: Split blocked_on logic into two
states (blocked_on and blocked_on_waking)
This patch adds blocked_on_waking so we can track separately if
the task should be able to try to aquire the lock separately
from the lock it is blocked on.
This avoids some of the subtle magic where the blocked_on state
gets cleared, only to have it re-added by the
mutex_lock_slowpath call when it tries to aquire the lock on
wakeup
This should make dealing with the ww_mutex issue cleaner.
Cc: Joel Fernandes <joelaf@...gle.com>
Cc: Qais Yousef <qyousef@...gle.com>
Cc: Ingo Molnar <mingo@...hat.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Juri Lelli <juri.lelli@...hat.com>
Cc: Vincent Guittot <vincent.guittot@...aro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@....com>
Cc: Valentin Schneider <vschneid@...hat.com>
Cc: Steven Rostedt <rostedt@...dmis.org>
Cc: Ben Segall <bsegall@...gle.com>
Cc: Zimuzo Ezeozue <zezeozue@...gle.com>
Cc: Youssef Esmat <youssefesmat@...gle.com>
Cc: Mel Gorman <mgorman@...e.de>
Cc: Daniel Bristot de Oliveira <bristot@...hat.com>
Cc: Will Deacon <will@...nel.org>
Cc: Waiman Long <longman@...hat.com>
Cc: Boqun Feng <boqun.feng@...il.com>
Cc: "Paul E . McKenney" <paulmck@...nel.org>
Cc: kernel-team@...roid.com
Signed-off-by: John Stultz <jstultz@...gle.com>
---
include/linux/sched.h | 2 ++
kernel/fork.c | 1 +
kernel/locking/mutex.c | 7 ++++---
kernel/locking/ww_mutex.h | 12 ++++++------
kernel/sched/sched.h | 12 ++++++++++++
5 files changed, 25 insertions(+), 9 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0f32bea47e5e..3b7f26df2496 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1141,6 +1141,7 @@ struct task_struct {
#endif
struct mutex *blocked_on; /* lock we're blocked on */
+ bool blocked_on_waking; /* blocked on, but waking */
raw_spinlock_t blocked_lock;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
@@ -2241,6 +2242,7 @@ static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
WARN_ON((!m && !p->blocked_on) || (m && p->blocked_on));
p->blocked_on = m;
+ p->blocked_on_waking = false;
}
static inline struct mutex *get_task_blocked_on(struct task_struct *p)
diff --git a/kernel/fork.c b/kernel/fork.c
index 8bad899b6c6e..5b11ead90b12 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2460,6 +2460,7 @@ __latent_entropy struct task_struct *copy_process(
#endif
p->blocked_on = NULL; /* not blocked yet */
+ p->blocked_on_waking = false; /* not blocked yet */
#ifdef CONFIG_BCACHE
p->sequential_io = 0;
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 04b0ea45cc01..687009eca2d1 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -666,10 +666,11 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
raw_spin_lock_irqsave(&lock->wait_lock, flags);
raw_spin_lock(¤t->blocked_lock);
+
/*
- * Gets reset by unlock path().
+ * Clear blocked_on_waking flag set by the unlock path().
*/
- set_task_blocked_on(current, lock);
+ current->blocked_on_waking = false;
set_current_state(state);
/*
* Here we order against unlock; we must either see it change
@@ -948,7 +949,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
debug_mutex_wake_waiter(lock, waiter);
raw_spin_lock(&next->blocked_lock);
WARN_ON(next->blocked_on != lock);
- set_task_blocked_on(current, NULL);
+ next->blocked_on_waking = true;
raw_spin_unlock(&next->blocked_lock);
wake_q_add(&wake_q, next);
}
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index 44a532dda927..3b0a68d7e308 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -287,12 +287,12 @@ __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
debug_mutex_wake_waiter(lock, waiter);
#endif
/*
- * When waking up the task to die, be sure to clear the
- * blocked_on pointer. Otherwise we can see circular
+ * When waking up the task to die, be sure to set the
+ * blocked_on_waking flag. Otherwise we can see circular
* blocked_on relationships that can't resolve.
*/
WARN_ON(waiter->task->blocked_on != lock);
- set_task_blocked_on(waiter->task, NULL);
+ waiter->task->blocked_on_waking = true;
wake_q_add(wake_q, waiter->task);
raw_spin_unlock(&waiter->task->blocked_lock);
}
@@ -345,11 +345,11 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
/* nested as we should hold current->blocked_lock already */
raw_spin_lock_nested(&owner->blocked_lock, SINGLE_DEPTH_NESTING);
/*
- * When waking up the task to wound, be sure to clear the
- * blocked_on pointer. Otherwise we can see circular
+ * When waking up the task to wound, be sure to set the
+ * blocked_on_waking flag. Otherwise we can see circular
* blocked_on relationships that can't resolve.
*/
- set_task_blocked_on(owner, NULL);
+ owner->blocked_on_waking = true;
wake_q_add(wake_q, owner);
raw_spin_unlock(&owner->blocked_lock);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 33ad47a093ae..95900ccaaf82 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2111,6 +2111,18 @@ static inline int task_current(struct rq *rq, struct task_struct *p)
return rq->curr == p;
}
+#ifdef CONFIG_PROXY_EXEC
+static inline bool task_is_blocked(struct task_struct *p)
+{
+ return !!p->blocked_on && !p->blocked_on_waking;
+}
+#else /* !PROXY_EXEC */
+static inline bool task_is_blocked(struct task_struct *p)
+{
+ return false;
+}
+#endif /* PROXY_EXEC */
+
static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
{
#ifdef CONFIG_SMP
--
2.42.0.rc1.204.g551eb34607-goog
Powered by blists - more mailing lists