lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 14 Nov 2022 11:24:17 -0500
From:   Jason Baron <jbaron@...mai.com>
To:     Dmitry Safonov <dima@...sta.com>, linux-kernel@...r.kernel.org,
        David Ahern <dsahern@...nel.org>,
        Eric Dumazet <edumazet@...gle.com>
Cc:     Bob Gilligan <gilligan@...sta.com>,
        "David S. Miller" <davem@...emloft.net>,
        Dmitry Safonov <0x7f454c46@...il.com>,
        Francesco Ruggeri <fruggeri@...sta.com>,
        Hideaki YOSHIFUJI <yoshfuji@...ux-ipv6.org>,
        Jakub Kicinski <kuba@...nel.org>,
        Paolo Abeni <pabeni@...hat.com>,
        Salam Noureddine <noureddine@...sta.com>,
        netdev@...r.kernel.org, Ard Biesheuvel <ardb@...nel.org>,
        Josh Poimboeuf <jpoimboe@...nel.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Steven Rostedt <rostedt@...dmis.org>
Subject: Re: [PATCH v3 1/3] jump_label: Prevent key->enabled int overflow


On 11/11/22 16:23, Dmitry Safonov wrote:
> diff --git a/kernel/jump_label.c b/kernel/jump_label.c
> index 714ac4c3b556..f2c1aa351d41 100644
> --- a/kernel/jump_label.c
> +++ b/kernel/jump_label.c
> @@ -113,11 +113,38 @@ int static_key_count(struct static_key *key)
>  }
>  EXPORT_SYMBOL_GPL(static_key_count);
>  
> -void static_key_slow_inc_cpuslocked(struct static_key *key)
> +/***
> + * static_key_fast_inc - adds a user for a static key
> + * @key: static key that must be already enabled
> + *
> + * The caller must make sure that the static key can't get disabled while
> + * in this function. It doesn't patch jump labels, only adds a user to
> + * an already enabled static key.
> + *
> + * Returns true if the increment was done.
> + */
> +bool static_key_fast_inc(struct static_key *key)
>  {
>  	int v, v1;
>  
>  	STATIC_KEY_CHECK_USE(key);
> +	/*
> +	 * Negative key->enabled has a special meaning: it sends
> +	 * static_key_slow_inc() down the slow path, and it is non-zero
> +	 * so it counts as "enabled" in jump_label_update().  Note that
> +	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
> +	 */
> +	for (v = atomic_read(&key->enabled); v > 0 && (v + 1) > 0; v = v1) {
> +		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
> +		if (likely(v1 == v))
> +			return true;
> +	}
> +	return false;
> +}
> +EXPORT_SYMBOL_GPL(static_key_fast_inc);
> +
> +bool static_key_slow_inc_cpuslocked(struct static_key *key)
> +{
>  	lockdep_assert_cpus_held();
>  
>  	/*
> @@ -126,17 +153,9 @@ void static_key_slow_inc_cpuslocked(struct static_key *key)
>  	 * jump_label_update() process.  At the same time, however,
>  	 * the jump_label_update() call below wants to see
>  	 * static_key_enabled(&key) for jumps to be updated properly.
> -	 *
> -	 * So give a special meaning to negative key->enabled: it sends
> -	 * static_key_slow_inc() down the slow path, and it is non-zero
> -	 * so it counts as "enabled" in jump_label_update().  Note that
> -	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
>  	 */
> -	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
> -		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
> -		if (likely(v1 == v))
> -			return;
> -	}
> +	if (static_key_fast_inc(key))
> +		return true;
>  
>  	jump_label_lock();
>  	if (atomic_read(&key->enabled) == 0) {
> @@ -148,16 +167,23 @@ void static_key_slow_inc_cpuslocked(struct static_key *key)
>  		 */
>  		atomic_set_release(&key->enabled, 1);
>  	} else {
> -		atomic_inc(&key->enabled);
> +		if (WARN_ON_ONCE(static_key_fast_inc(key))) {

Shouldn't that be negated to catch the overflow:

if (WARN_ON_ONCE(!static_key_fast_inc(key)))



> +			jump_label_unlock();
> +			return false;
> +		}
>  	}
>  	jump_label_unlock();
> +	return true;
>  }
>  
> -void static_key_slow_inc(struct static_key *key)
> +bool static_key_slow_inc(struct static_key *key)
>  {
> +	bool ret;
> +
>  	cpus_read_lock();
> -	static_key_slow_inc_cpuslocked(key);
> +	ret = static_key_slow_inc_cpuslocked(key);
>  	cpus_read_unlock();
> +	return ret;
>  }
>  EXPORT_SYMBOL_GPL(static_key_slow_inc);
>  

Powered by blists - more mailing lists