lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Zyn4uGY7pObMD14u@arm.com>
Date: Tue, 5 Nov 2024 10:51:36 +0000
From: Catalin Marinas <catalin.marinas@....com>
To: Koichiro Den <koichiro.den@...il.com>
Cc: vbabka@...e.cz, cl@...ux.com, penberg@...nel.org, rientjes@...gle.com,
	iamjoonsoo.kim@....com, akpm@...ux-foundation.org,
	roman.gushchin@...ux.dev, 42.hyeyoo@...il.com, kees@...nel.org,
	linux-mm@...ck.org, linux-kernel@...r.kernel.org,
	stable@...r.kernel.org
Subject: Re: [PATCH v2] mm/slab: fix warning caused by duplicate kmem_cache
 creation in kmem_buckets_create

On Tue, Nov 05, 2024 at 11:27:47AM +0900, Koichiro Den wrote:
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 3d26c257ed8b..db6ffe53c23e 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -380,8 +380,11 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
>  				  unsigned int usersize,
>  				  void (*ctor)(void *))
>  {
> +	unsigned long mask = 0;
> +	unsigned int idx;
>  	kmem_buckets *b;
> -	int idx;
> +
> +	BUILD_BUG_ON(ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]) > BITS_PER_LONG);
>  
>  	/*
>  	 * When the separate buckets API is not built in, just return
> @@ -403,7 +406,7 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
>  	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
>  		char *short_size, *cache_name;
>  		unsigned int cache_useroffset, cache_usersize;
> -		unsigned int size;
> +		unsigned int size, aligned_idx;
>  
>  		if (!kmalloc_caches[KMALLOC_NORMAL][idx])
>  			continue;
> @@ -416,10 +419,6 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
>  		if (WARN_ON(!short_size))
>  			goto fail;
>  
> -		cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1);
> -		if (WARN_ON(!cache_name))
> -			goto fail;
> -
>  		if (useroffset >= size) {
>  			cache_useroffset = 0;
>  			cache_usersize = 0;
> @@ -427,18 +426,29 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
>  			cache_useroffset = useroffset;
>  			cache_usersize = min(size - cache_useroffset, usersize);
>  		}
> -		(*b)[idx] = kmem_cache_create_usercopy(cache_name, size,
> -					0, flags, cache_useroffset,
> -					cache_usersize, ctor);
> -		kfree(cache_name);
> -		if (WARN_ON(!(*b)[idx]))
> -			goto fail;
> +
> +		aligned_idx = __kmalloc_index(size, false);
> +		if (!(*b)[aligned_idx]) {
> +			cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1);
> +			if (WARN_ON(!cache_name))
> +				goto fail;
> +			(*b)[aligned_idx] = kmem_cache_create_usercopy(cache_name, size,
> +						0, flags, cache_useroffset,
> +						cache_usersize, ctor);
> +			if (WARN_ON(!(*b)[aligned_idx])) {
> +				kfree(cache_name);
> +				goto fail;
> +			}
> +			set_bit(aligned_idx, &mask);
> +		}
> +		if (idx != aligned_idx)
> +			(*b)[idx] = (*b)[aligned_idx];
>  	}

It looks fine. This pretty much matches the logic in new_kmalloc_cache()
(from commit 963e84b0f262).

>  	return b;
>  
>  fail:
> -	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++)
> +	for_each_set_bit(idx, &mask, ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]))
>  		kmem_cache_destroy((*b)[idx]);
>  	kmem_cache_free(kmem_buckets_cache, b);

I gave this a try with swiotlb=noforce as well (which pushed the minimum
alignment to 64). So:

Reviewed-by: Catalin Marinas <catalin.marinas@....com>
Tested-by: Catalin Marinas <catalin.marinas@....com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ