lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241015224958.64713-1-kuniyu@amazon.com>
Date: Tue, 15 Oct 2024 15:49:58 -0700
From: Kuniyuki Iwashima <kuniyu@...zon.com>
To: <gnaaman@...venets.com>
CC: <davem@...emloft.net>, <edumazet@...gle.com>, <kuba@...nel.org>,
	<kuniyu@...zon.com>, <netdev@...r.kernel.org>, <pabeni@...hat.com>
Subject: Re: [PATCH net-next v4 1/6] Add hlist_node to struct neighbour

From: Gilad Naaman <gnaaman@...venets.com>
Date: Tue, 15 Oct 2024 16:59:21 +0000
> @@ -531,7 +533,9 @@ static void neigh_get_hash_rnd(u32 *x)
>  static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
>  {
>  	size_t size = (1 << shift) * sizeof(struct neighbour *);
> +	size_t hash_heads_size = (1 << shift) * sizeof(struct hlist_head);
>  	struct neigh_hash_table *ret;
> +	struct hlist_head *hash_heads;
>  	struct neighbour __rcu **buckets;
>  	int i;

nit:

While at it, please sort variables in reverse xmas tree order.
Same for other places.

https://docs.kernel.org/process/maintainer-netdev.html#local-variable-ordering-reverse-xmas-tree-rcs

>  
> @@ -540,17 +544,28 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
>  		return NULL;
>  	if (size <= PAGE_SIZE) {
>  		buckets = kzalloc(size, GFP_ATOMIC);
> +		hash_heads = kzalloc(hash_heads_size, GFP_ATOMIC);
> +		if (!hash_heads)
> +			kfree(buckets);
>  	} else {
>  		buckets = (struct neighbour __rcu **)
>  			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
>  					   get_order(size));
>  		kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
> +
> +		hash_heads = (struct hlist_head *)
> +			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
> +					   get_order(hash_heads_size));
> +		kmemleak_alloc(hash_heads, hash_heads_size, 1, GFP_ATOMIC);
> +		if (!hash_heads)
> +			free_pages((unsigned long)buckets, get_order(size));
>  	}
> -	if (!buckets) {
> +	if (!buckets || !hash_heads) {
>  		kfree(ret);
>  		return NULL;

If buckets is NULL and hash_heads isn't, hash_heads is leaked.


>  	}
>  	ret->hash_buckets = buckets;
> +	ret->hash_heads = hash_heads;
>  	ret->hash_shift = shift;
>  	for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
>  		neigh_get_hash_rnd(&ret->hash_rnd[i]);

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ