lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 23 Sep 2015 12:32:53 +0200
From:	Rasmus Villemoes <linux@...musvillemoes.dk>
To:	Andi Kleen <andi@...stfloor.org>
Cc:	tytso@....edu, linux-kernel@...r.kernel.org,
	kirill.shutemov@...ux.intel.com, herbert@...dor.apana.org.au,
	Andi Kleen <ak@...ux.intel.com>
Subject: Re: [PATCH 1/3] Make /dev/urandom scalable

On Wed, Sep 23 2015, Andi Kleen <andi@...stfloor.org> wrote:

> @@ -467,7 +478,7 @@ static struct entropy_store blocking_pool = {
>  
>  static struct entropy_store nonblocking_pool = {
>  	.poolinfo = &poolinfo_table[1],
> -	.name = "nonblocking",
> +	.name = "nonblocking 0",
>  	.pull = &input_pool,
>  	.lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
>  	.pool = nonblocking_pool_data,
> @@ -475,6 +486,32 @@ static struct entropy_store nonblocking_pool = {
>  					push_to_pool),
>  };
>  
> +/*
> + * Per NUMA node nonblocking pool. This avoids lock contention
> + * when many processes extract data from /dev/urandom in parallel.
> + * /dev/random stays single instance for now.
> + */
> +static struct entropy_store **nonblocking_node_pool __read_mostly;
> +
> +#define for_each_nb_pool(i, pool) for (i = 0; i < num_possible_nodes(); i++) { \
> +	pool = nonblocking_node_pool[i];
> +#define end_for_each_nb() }

You can avoid the need for end_for_each_nb (end_for_each_nb_pool?) by
writing the condition

  i < num_possible_nodes() && (pool = nonblocking_node_pool[i], 1)

[if you keep it, please be consistent in whether end_for_each_nb() is
followed by ; or not, or add do{}while(0) and make the rule that it is].

But more importantly: Won't this generate a function call (ultimately to
bitmap_weight) for each iteration, at least for large enough
CONFIG_NODES_SHIFT? It's probably not very expensive, but would be nice
to avoid.

> +static inline struct entropy_store *get_nonblocking_pool(void)
> +{
> +	struct entropy_store *pool = &nonblocking_pool;
> +
> +	/*
> +	 * Non node 0 pools may take longer to initialize. Keep using
> +	 * the boot nonblocking pool while this happens.
> +	 */
> +	if (nonblocking_node_pool)
> +		pool = nonblocking_node_pool[numa_node_id()];
> +	if (!pool->initialized)
> +		pool = &nonblocking_pool;
> +	return pool;
> +}

I assume this can't get called concurrently with rand_initialize
(otherwise pool may be NULL even if nonblocking_node_pool is non-NULL). 

>  
> @@ -1393,9 +1446,32 @@ static void init_std_data(struct entropy_store *r)
>   */
>  static int rand_initialize(void)
>  {
> +	int i;
> +	int num_nodes = num_possible_nodes();
> +	char name[40];
> +
> +	nonblocking_node_pool = kzalloc(num_nodes * sizeof(void *),
> +					GFP_KERNEL|__GFP_NOFAIL);
> +

Why kzalloc, when you immediately initialize all elements? New uses of
__GFP_NOFAIL seem to be frowned upon. How hard would it be to just fall
back to only using the single statically allocated pool?

Does rand_initialize get called before or after other initialization
code updates node_possible_map to reflect the actual possible number of
nodes? If before, won't we be wasting a lot of memory (not to mention
that we then might as well allocate all the nonblocking pools statically
based on MAX_NUMNODES).

>  	init_std_data(&input_pool);
>  	init_std_data(&blocking_pool);
> +	nonblocking_node_pool[0] = &nonblocking_pool;
>  	init_std_data(&nonblocking_pool);
> +	for (i = 1; i < num_nodes; i++) {
> +		struct entropy_store *pool = kzalloc(sizeof(struct entropy_store),
> +						     GFP_KERNEL|__GFP_NOFAIL);
> +		nonblocking_node_pool[i] = pool;
> +		pool->poolinfo = &poolinfo_table[1];
> +		pool->pull = &input_pool;
> +		spin_lock_init(&pool->lock);
> +		/* pool data not cleared intentionally */
> +		pool->pool = kmalloc(sizeof(nonblocking_pool_data),
> +				     GFP_KERNEL|__GFP_NOFAIL);
> +		INIT_WORK(&pool->push_work, push_to_pool);
> +		snprintf(name, sizeof name, "nonblocking pool %d", i);
> +		pool->name = kstrdup(name, GFP_KERNEL|__GFP_NOFAIL);

kasprintf(). Also, you renamed the static pool to "nonblocking 0".


Rasmus
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ