lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <57AC9347.1060105@hpe.com>
Date:	Thu, 11 Aug 2016 11:01:27 -0400
From:	Waiman Long <waiman.long@....com>
To:	Waiman Long <Waiman.Long@....com>
CC:	Ingo Molnar <mingo@...hat.com>,
	Peter Zijlstra <peterz@...radead.org>,
	<linux-kernel@...r.kernel.org>,
	Linus Torvalds <torvalds@...ux-foundation.org>,
	Ding Tianhong <dingtianhong@...wei.com>,
	Jason Low <jason.low2@....com>,
	Davidlohr Bueso <dave@...olabs.net>,
	"Paul E. McKenney" <paulmck@...ibm.com>,
	Thomas Gleixner <tglx@...utronix.de>,
	Will Deacon <Will.Deacon@....com>,
	Tim Chen <tim.c.chen@...ux.intel.com>,
	Imre Deak <imre.deak@...el.com>
Subject: Re: [PATCH v5 3/3] locking/mutex: Ensure forward progress of waiter-spinner

On 08/10/2016 02:25 PM, Waiman Long wrote:
> As both an optimistic spinner and a waiter-spinner (a woken task from
> the wait queue spinning) can be spinning on the lock at the same time,
> we cannot ensure forward progress for the waiter-spinner. So it is
> possible for the waiter-spinner to be starved of getting the lock,
> though not likely.
>
> This patch adds a flag to indicate that a waiter-spinner is
> spinning and hence has priority over the acquisition of the lock. A
> waiter-spinner sets this flag while spinning. An optimistic spinner
> will check this flag and yield if set. This essentially makes the
> waiter-spinner jump to the head of the optimistic spinning queue to
> acquire the lock.
>
> There will be no increase in size for the mutex structure for
> 64-bit architectures as there is an existing 4-byte hole. For 32-bit
> architectures, there will be a size increase of 4 bytes.
>
> Signed-off-by: Waiman Long<Waiman.Long@....com>
> ---
>   include/linux/mutex.h  |    1 +
>   kernel/locking/mutex.c |   36 +++++++++++++++++++++++++++---------
>   2 files changed, 28 insertions(+), 9 deletions(-)
>
> diff --git a/include/linux/mutex.h b/include/linux/mutex.h
> index 2cb7531..f8e91ad 100644
> --- a/include/linux/mutex.h
> +++ b/include/linux/mutex.h
> @@ -57,6 +57,7 @@ struct mutex {
>   #endif
>   #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
>   	struct optimistic_spin_queue osq; /* Spinner MCS lock */
> +	int waiter_spinning;
>   #endif
>   #ifdef CONFIG_DEBUG_MUTEXES
>   	void			*magic;
> diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
> index 15b521a..0912964 100644
> --- a/kernel/locking/mutex.c
> +++ b/kernel/locking/mutex.c
> @@ -55,6 +55,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
>   	mutex_clear_owner(lock);
>   #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
>   	osq_lock_init(&lock->osq);
> +	lock->waiter_spinning = false;
>   #endif
>
>   	debug_mutex_init(lock, name, key);
> @@ -337,9 +338,21 @@ static bool mutex_optimistic_spin(struct mutex *lock,
>   		 */
>   		if (!osq_lock(&lock->osq))
>   			goto done;
> +	} else {
> +		/*
> +		 * Turn on the waiter spinning flag to discourage the spinner
> +		 * from getting the lock.
> +		 */
> +		lock->waiter_spinning = true;
>   	}
>
> -	while (true) {
> +	/*
> +	 * The cpu_relax_lowlatency() call is a compiler barrier which forces
> +	 * everything in this loop to be re-loaded. We don't need memory
> +	 * barriers as we'll eventually observe the right values at the cost
> +	 * of a few extra spins.
> +	 */
> +	for (;; cpu_relax_lowlatency()) {
>   		struct task_struct *owner;
>
>   		if (use_ww_ctx&&  ww_ctx->acquired>  0) {
> @@ -359,6 +372,17 @@ static bool mutex_optimistic_spin(struct mutex *lock,
>   		}
>
>   		/*
> +		 * For regular opt-spinner, it waits until the waiter_spinning
> +		 * flag isn't set. This will ensure forward progress for
> +		 * the waiter spinner.
> +		 */
> +		if (!waiter&&  READ_ONCE(lock->waiter_spinning)) {
> +			if (need_resched())
> +				break;
> +			continue;
> +		}
> +
> +		/*
>   		 * If there's an owner, wait for it to either
>   		 * release the lock or go to sleep.
>   		 */
> @@ -390,18 +414,12 @@ static bool mutex_optimistic_spin(struct mutex *lock,
>   		 */
>   		if (!owner&&  (need_resched() || rt_task(task)))
>   			break;
> -
> -		/*
> -		 * The cpu_relax() call is a compiler barrier which forces
> -		 * everything in this loop to be re-loaded. We don't need
> -		 * memory barriers as we'll eventually observe the right
> -		 * values at the cost of a few extra spins.
> -		 */
> -		cpu_relax_lowlatency();
>   	}
>
>   	if (!waiter)
>   		osq_unlock(&lock->osq);
> +	else
> +		lock->waiter_spinning = false;
>   done:
>   	/*
>   	 * If we fell out of the spin path because of need_resched(),


The following is the updated patch that should fix the build error in 
non-x86 platform.

Cheers,
Longman

================================ cut here ================================

locking/mutex: Ensure forward progress of waiter-spinner

As both an optimistic spinner and a waiter-spinner (a woken task from
the wait queue spinning) can be spinning on the lock at the same time,
we cannot ensure forward progress for the waiter-spinner. So it is
possible for the waiter-spinner to be starved of getting the lock,
though not likely.

This patch adds a flag to indicate that a waiter-spinner is
spinning and hence has priority over the acquisition of the lock. A
waiter-spinner sets this flag while spinning. An optimistic spinner
will check this flag and yield if set. This essentially makes the
waiter-spinner jump to the head of the optimistic spinning queue to
acquire the lock.

There will be no increase in size for the mutex structure for
64-bit architectures as there is an existing 4-byte hole. For 32-bit
architectures, there will be a size increase of 4 bytes.

Signed-off-by: Waiman Long <Waiman.Long@....com>
---
  include/linux/mutex.h  |    1 +
  kernel/locking/mutex.c |   21 +++++++++++++++++++++
  2 files changed, 22 insertions(+), 0 deletions(-)

diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2cb7531..f8e91ad 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -57,6 +57,7 @@ struct mutex {
  #endif
  #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
      struct optimistic_spin_queue osq; /* Spinner MCS lock */
+    int waiter_spinning;
  #endif
  #ifdef CONFIG_DEBUG_MUTEXES
      void            *magic;
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 15b521a..02d8029 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -55,6 +55,7 @@ __mutex_init(struct mutex *lock, const char *name, 
struct lock_class_key *key)
      mutex_clear_owner(lock);
  #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
      osq_lock_init(&lock->osq);
+    lock->waiter_spinning = false;
  #endif

      debug_mutex_init(lock, name, key);
@@ -337,6 +338,12 @@ static bool mutex_optimistic_spin(struct mutex *lock,
           */
          if (!osq_lock(&lock->osq))
              goto done;
+    } else {
+        /*
+         * Turn on the waiter spinning flag to discourage the spinner
+         * from getting the lock.
+         */
+        lock->waiter_spinning = true;
      }

      while (true) {
@@ -359,6 +366,17 @@ static bool mutex_optimistic_spin(struct mutex *lock,
          }

          /*
+         * For regular opt-spinner, it waits until the waiter_spinning
+         * flag isn't set. This will ensure forward progress for
+         * the waiter spinner.
+         */
+        if (!waiter && READ_ONCE(lock->waiter_spinning)) {
+            if (need_resched())
+                break;
+            goto relax;
+        }
+
+        /*
           * If there's an owner, wait for it to either
           * release the lock or go to sleep.
           */
@@ -391,6 +409,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
          if (!owner && (need_resched() || rt_task(task)))
              break;

+relax:
          /*
           * The cpu_relax() call is a compiler barrier which forces
           * everything in this loop to be re-loaded. We don't need
@@ -402,6 +421,8 @@ static bool mutex_optimistic_spin(struct mutex *lock,

      if (!waiter)
          osq_unlock(&lock->osq);
+    else
+        lock->waiter_spinning = false;
  done:
      /*
       * If we fell out of the spin path because of need_resched(),
-- 
1.7.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ