lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 31 Jul 2017 11:36:12 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     torvalds@...ux-foundation.org, linux-kernel@...r.kernel.org,
        tglx@...utronix.de, mingo@...nel.org, pbonzini@...hat.com,
        dvyukov@...gle.com, hpa@...or.com
Cc:     linux-tip-commits@...r.kernel.org,
        Steven Rostedt <rostedt@...dmis.org>
Subject: Re: [tip:locking/urgent] locking/static_key: Fix concurrent
 static_key_slow_inc()

On Fri, Jun 24, 2016 at 01:59:06AM -0700, tip-bot for Paolo Bonzini wrote:
> +++ b/kernel/jump_label.c
> @@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
>  
>  void static_key_slow_inc(struct static_key *key)
>  {
> +	int v, v1;
> +
>  	STATIC_KEY_CHECK_USE();
> -	if (atomic_inc_not_zero(&key->enabled))
> -		return;
> +
> +	/*
> +	 * Careful if we get concurrent static_key_slow_inc() calls;
> +	 * later calls must wait for the first one to _finish_ the
> +	 * jump_label_update() process.  At the same time, however,
> +	 * the jump_label_update() call below wants to see
> +	 * static_key_enabled(&key) for jumps to be updated properly.
> +	 *
> +	 * So give a special meaning to negative key->enabled: it sends
> +	 * static_key_slow_inc() down the slow path, and it is non-zero
> +	 * so it counts as "enabled" in jump_label_update().  Note that
> +	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
> +	 */
> +	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
> +		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
> +		if (likely(v1 == v))
> +			return;
> +	}
>  
>  	jump_label_lock();
> -	if (atomic_inc_return(&key->enabled) == 1)
> +	if (atomic_read(&key->enabled) == 0) {
> +		atomic_set(&key->enabled, -1);
>  		jump_label_update(key);
> +		atomic_set(&key->enabled, 1);
> +	} else {
> +		atomic_inc(&key->enabled);
> +	}
>  	jump_label_unlock();
>  }


So I was recently looking at this again and got all paranoid. Do we want
something like so?

---
 kernel/jump_label.c | 24 +++++++++++++++++-------
 1 file changed, 17 insertions(+), 7 deletions(-)

diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index d11c506a6ac3..69d07e78e48b 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -103,7 +103,7 @@ EXPORT_SYMBOL_GPL(static_key_disable);
 
 void static_key_slow_inc(struct static_key *key)
 {
-	int v, v1;
+	int v;
 
 	STATIC_KEY_CHECK_USE();
 
@@ -119,18 +119,28 @@ void static_key_slow_inc(struct static_key *key)
 	 * so it counts as "enabled" in jump_label_update().  Note that
 	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
 	 */
-	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
-		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
-		if (likely(v1 == v))
+	for (v = atomic_read(&key->enabled); v > 0;) {
+		if (atomic_try_cmpxchg(&key->enabled, &v, v+1))
 			return;
 	}
 
 	cpus_read_lock();
 	jump_label_lock();
-	if (atomic_read(&key->enabled) == 0) {
-		atomic_set(&key->enabled, -1);
+
+	if (atomic_try_cmpxchg(&key->enabled, 0, -1)) {
+		/*
+		 * smp_mb implied, must have -1 before proceeding to change
+		 * text.
+		 */
 		jump_label_update(key);
-		atomic_set(&key->enabled, 1);
+
+		/*
+		 * smp_mb, such that we finish modifying text before enabling
+		 * the fast path. Probably not needed because modifying text is
+		 * likely to serialize everything. Be paranoid.
+		 */
+		smp_mb__before_atomic();
+		atomic_add(2, &key->enabled); /* -1 -> 1 */
 	} else {
 		atomic_inc(&key->enabled);
 	}

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ