lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YoS0Pn9IotUrQh01@alley>
Date:   Wed, 18 May 2022 10:54:22 +0200
From:   Petr Mladek <pmladek@...e.com>
To:     "Jason A. Donenfeld" <Jason@...c4.com>
Cc:     linux-kernel@...r.kernel.org, linux-crypto@...r.kernel.org,
        Steven Rostedt <rostedt@...dmis.org>,
        Sergey Senozhatsky <senozhatsky@...omium.org>
Subject: Re: [PATCH] random: remove mostly unused async readiness notifier

On Sun 2022-05-15 15:19:27, Jason A. Donenfeld wrote:
> The register_random_ready_notifier() notifier is somewhat complicated,
> and was already recently rewritten to use notifier blocks. It is only
> used now by one consumer in the kernel, vsprintf.c, for which the async
> mechanism is really overly complex for what it actually needs. This
> commit removes register_random_ready_notifier() and unregister_random_
> ready_notifier(), because it just adds complication with little utility,
> and changes vsprintf.c to just check on `!rng_is_initialized() &&
> !rng_has_arch_random()`, which will eventually be true. Performance-
> wise, that code was already using a static branch, so there's basically
> no overhead at all to this change.
> 
> --- a/lib/vsprintf.c
> +++ b/lib/vsprintf.c
> @@ -750,60 +750,37 @@ static int __init debug_boot_weak_hash_enable(char *str)
>  }
>  early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
>  
> -static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
> -static siphash_key_t ptr_key __read_mostly;
> +static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key);
>  
>  static void enable_ptr_key_workfn(struct work_struct *work)
>  {
> -	get_random_bytes(&ptr_key, sizeof(ptr_key));
> -	/* Needs to run from preemptible context */
> -	static_branch_disable(&not_filled_random_ptr_key);
> +	static_branch_enable(&filled_random_ptr_key);
>  }
>  
> -static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
> -
> -static int fill_random_ptr_key(struct notifier_block *nb,
> -			       unsigned long action, void *data)
> +/* Maps a pointer to a 32 bit unique identifier. */
> +static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
>  {
> -	/* This may be in an interrupt handler. */
> -	queue_work(system_unbound_wq, &enable_ptr_key_work);
> -	return 0;
> -}
> -
> -static struct notifier_block random_ready = {
> -	.notifier_call = fill_random_ptr_key
> -};
> +	static siphash_key_t ptr_key __read_mostly;
> +	unsigned long hashval;
>  
> -static int __init initialize_ptr_random(void)
> -{
> -	int ret;
> +	if (!static_branch_likely(&filled_random_ptr_key)) {
> +		static bool filled = false;
> +		static DEFINE_SPINLOCK(filling);
> +		static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
> +		unsigned long flags;
>  
> -	/* Don't bother waiting for RNG to be ready if RDRAND is mixed in already. */
> -	if (rng_has_arch_random()) {
> -		enable_ptr_key_workfn(&enable_ptr_key_work);
> -		return 0;
> -	}
> +		if (!rng_is_initialized() && !rng_has_arch_random())
> +			return -EAGAIN;
>  
> -	ret = register_random_ready_notifier(&random_ready);
> -	if (!ret) {
> -		return 0;
> -	} else if (ret == -EALREADY) {
> -		/* This is in preemptible context */
> -		enable_ptr_key_workfn(&enable_ptr_key_work);
> -		return 0;
> +		spin_lock_irqsave(&filling, flags);

I thought more about this and there is a small risk of a deadlock
when get_random_bytes() or queue_work() or NMI calls
printk()/vsprintf() with %p here.

A simple solution would be to use trylock():

		if (!spin_trylock_irqsave(&filling, flags))
			return -EDEADLK;

Could we do this change, please?

I do not mind if it will be done by re-spinning the original
patch or another patch on top of it.

Best Regards,
Petr


> +		if (!filled) {
> +			get_random_bytes(&ptr_key, sizeof(ptr_key));
> +			queue_work(system_unbound_wq, &enable_ptr_key_work);
> +			filled = true;
> +		}
> +		spin_unlock_irqrestore(&filling, flags);
>  	}
>  
> -	return ret;
> -}
> -early_initcall(initialize_ptr_random);
> -
> -/* Maps a pointer to a 32 bit unique identifier. */
> -static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
> -{
> -	unsigned long hashval;
> -
> -	if (static_branch_unlikely(&not_filled_random_ptr_key))
> -		return -EAGAIN;
>  
>  #ifdef CONFIG_64BIT
>  	hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ