[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1468969470.10247.15.camel@j-VirtualBox>
Date: Tue, 19 Jul 2016 16:04:30 -0700
From: Jason Low <jason.low2@....com>
To: imre.deak@...el.com
Cc: jason.low2@....com, Peter Zijlstra <peterz@...radead.org>,
linux-kernel@...r.kernel.org, Ingo Molnar <mingo@...hat.com>,
Chris Wilson <chris@...is-wilson.co.uk>,
Daniel Vetter <daniel.vetter@...el.com>,
Waiman Long <Waiman.Long@....com>,
Davidlohr Bueso <dave@...olabs.net>, jason.low2@...com
Subject: [RFC] Avoid mutex starvation when optimistic spinning is disabled
Hi Imre,
Here is a patch which prevents a thread from spending too much "time"
waiting for a mutex in the !CONFIG_MUTEX_SPIN_ON_OWNER case.
Would you like to try this out and see if this addresses the mutex
starvation issue you are seeing in your workload when optimistic
spinning is disabled?
Thanks.
---
Signed-off-by: Jason Low <jason.low2@....com>
---
include/linux/mutex.h | 2 ++
kernel/locking/mutex.c | 61 +++++++++++++++++++++++++++++++++++++++++++++-----
2 files changed, 58 insertions(+), 5 deletions(-)
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2cb7531..c1ca68d 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -57,6 +57,8 @@ struct mutex {
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* Spinner MCS lock */
+#else
+ bool yield_to_waiter; /* Prevent starvation when spinning disabled */
#endif
#ifdef CONFIG_DEBUG_MUTEXES
void *magic;
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index a70b90d..fec78a7 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -55,6 +55,8 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
mutex_clear_owner(lock);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
osq_lock_init(&lock->osq);
+#else
+ lock->yield_to_waiter = false;
#endif
debug_mutex_init(lock, name, key);
@@ -71,6 +73,9 @@ EXPORT_SYMBOL(__mutex_init);
*/
__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
+
+static inline bool need_yield_to_waiter(struct mutex *lock);
+
/**
* mutex_lock - acquire the mutex
* @lock: the mutex to be acquired
@@ -95,11 +100,15 @@ __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
void __sched mutex_lock(struct mutex *lock)
{
might_sleep();
+
/*
* The locking fastpath is the 1->0 transition from
* 'unlocked' into 'locked' state.
*/
- __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
+ if (!need_yield_to_waiter(lock))
+ __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
+ else
+ __mutex_lock_slowpath(&lock->count);
mutex_set_owner(lock);
}
@@ -398,12 +407,39 @@ done:
return false;
}
+
+static inline void do_yield_to_waiter(struct mutex *lock, int loops)
+{
+ return;
+}
+
+static inline bool need_yield_to_waiter(struct mutex *lock)
+{
+ return false;
+}
+
#else
static bool mutex_optimistic_spin(struct mutex *lock,
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
return false;
}
+
+#define MUTEX_MAX_WAIT 16
+
+static inline void do_yield_to_waiter(struct mutex *lock, int loops)
+{
+ if (loops < MUTEX_MAX_WAIT)
+ return;
+
+ if (lock->yield_to_waiter != true)
+ lock->yield_to_waiter =true;
+}
+
+static inline bool need_yield_to_waiter(struct mutex *lock)
+{
+ return lock->yield_to_waiter;
+}
#endif
__visible __used noinline
@@ -510,6 +546,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct mutex_waiter waiter;
unsigned long flags;
int ret;
+ int loop = 0;
if (use_ww_ctx) {
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
@@ -546,6 +583,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
lock_contended(&lock->dep_map, ip);
for (;;) {
+ loop++;
+
/*
* Lets try to take the lock again - this is needed even if
* we get here for the first time (shortly after failing to
@@ -580,6 +619,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
/* didn't get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags);
schedule_preempt_disabled();
+ do_yield_to_waiter(lock, loop);
spin_lock_mutex(&lock->wait_lock, flags);
}
__set_task_state(task, TASK_RUNNING);
@@ -590,6 +630,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
atomic_set(&lock->count, 0);
debug_mutex_free_waiter(&waiter);
+#ifndef CONFIG_MUTEX_SPIN_ON_OWNER
+ if (lock->yield_to_waiter)
+ lock->yield_to_waiter = false;
+#endif
+
skip_wait:
/* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip);
@@ -789,10 +834,13 @@ __mutex_lock_interruptible_slowpath(struct mutex *lock);
*/
int __sched mutex_lock_interruptible(struct mutex *lock)
{
- int ret;
+ int ret = 1;
might_sleep();
- ret = __mutex_fastpath_lock_retval(&lock->count);
+
+ if (!need_yield_to_waiter(lock))
+ ret = __mutex_fastpath_lock_retval(&lock->count);
+
if (likely(!ret)) {
mutex_set_owner(lock);
return 0;
@@ -804,10 +852,13 @@ EXPORT_SYMBOL(mutex_lock_interruptible);
int __sched mutex_lock_killable(struct mutex *lock)
{
- int ret;
+ int ret = 1;
might_sleep();
- ret = __mutex_fastpath_lock_retval(&lock->count);
+
+ if (!need_yield_to_waiter(lock))
+ ret = __mutex_fastpath_lock_retval(&lock->count);
+
if (likely(!ret)) {
mutex_set_owner(lock);
return 0;
--
1.9.1
Powered by blists - more mailing lists