lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Wed, 27 Apr 2022 09:50:06 +0200
From:   Vlastimil Babka <vbabka@...e.cz>
To:     Hyeonggon Yoo <42.hyeyoo@...il.com>
Cc:     Marco Elver <elver@...gle.com>,
        Matthew WilCox <willy@...radead.org>,
        Christoph Lameter <cl@...ux.com>,
        Pekka Enberg <penberg@...nel.org>,
        David Rientjes <rientjes@...gle.com>,
        Joonsoo Kim <iamjoonsoo.kim@....com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Roman Gushchin <roman.gushchin@...ux.dev>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, Joe Perches <joe@...ches.com>
Subject: Re: [PATCH v2 12/23] mm/slab_common: cleanup kmalloc()

On 4/14/22 10:57, Hyeonggon Yoo wrote:
> Now that kmalloc() and kmalloc_node() do same job, make kmalloc()
> wrapper of kmalloc_node().
> 
> Remove kmem_cache_alloc_trace() that is now unused.
> 
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@...il.com>

>From correctness point of view:

Reviewed-by: Vlastimil Babka <vbabka@...e.cz>

But yeah, impact of requiring NUMA_NO_NODE parameter should be evaluated. If
it's significant I believe we should be still able to implement the common
kmalloc, but keep separate kmalloc and kmalloc_node entry points.

> ---
>  include/linux/slab.h | 93 +++++++++++++++-----------------------------
>  mm/slab.c            | 16 --------
>  mm/slub.c            | 12 ------
>  3 files changed, 32 insertions(+), 89 deletions(-)
> 
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index eb457f20f415..ea168f8a248d 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -497,23 +497,10 @@ static __always_inline void kfree_bulk(size_t size, void **p)
>  }
>  
>  #ifdef CONFIG_TRACING
> -extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
> -				   __assume_slab_alignment __alloc_size(3);
> -
>  extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
>  					 int node, size_t size) __assume_slab_alignment
>  								__alloc_size(4);
> -
>  #else /* CONFIG_TRACING */
> -static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
> -								    gfp_t flags, size_t size)
> -{
> -	void *ret = kmem_cache_alloc(s, flags);
> -
> -	ret = kasan_kmalloc(s, ret, size, flags);
> -	return ret;
> -}
> -
>  static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
>  							 int node, size_t size)
>  {
> @@ -532,6 +519,37 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
>  	return kmalloc_large_node(size, flags, NUMA_NO_NODE);
>  }
>  
> +#ifndef CONFIG_SLOB
> +static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
> +{
> +	if (__builtin_constant_p(size)) {
> +		unsigned int index;
> +
> +		if (size > KMALLOC_MAX_CACHE_SIZE)
> +			return kmalloc_large_node(size, flags, node);
> +
> +		index = kmalloc_index(size);
> +
> +		if (!index)
> +			return ZERO_SIZE_PTR;
> +
> +		return kmem_cache_alloc_node_trace(
> +				kmalloc_caches[kmalloc_type(flags)][index],
> +						flags, node, size);
> +	}
> +	return __kmalloc_node(size, flags, node);
> +}
> +#else
> +static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
> +{
> +	if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
> +		return kmalloc_large_node(size, flags, node);
> +
> +	return __kmalloc_node(size, flags, node);
> +}
> +#endif
> +
> +
>  /**
>   * kmalloc - allocate memory
>   * @size: how many bytes of memory are required.
> @@ -588,55 +606,8 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
>   */
>  static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
>  {
> -	if (__builtin_constant_p(size)) {
> -#ifndef CONFIG_SLOB
> -		unsigned int index;
> -#endif
> -		if (size > KMALLOC_MAX_CACHE_SIZE)
> -			return kmalloc_large(size, flags);
> -#ifndef CONFIG_SLOB
> -		index = kmalloc_index(size);
> -
> -		if (!index)
> -			return ZERO_SIZE_PTR;
> -
> -		return kmem_cache_alloc_trace(
> -				kmalloc_caches[kmalloc_type(flags)][index],
> -				flags, size);
> -#endif
> -	}
> -	return __kmalloc(size, flags);
> -}
> -
> -#ifndef CONFIG_SLOB
> -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
> -{
> -	if (__builtin_constant_p(size)) {
> -		unsigned int index;
> -
> -		if (size > KMALLOC_MAX_CACHE_SIZE)
> -			return kmalloc_large_node(size, flags, node);
> -
> -		index = kmalloc_index(size);
> -
> -		if (!index)
> -			return ZERO_SIZE_PTR;
> -
> -		return kmem_cache_alloc_node_trace(
> -				kmalloc_caches[kmalloc_type(flags)][index],
> -						flags, node, size);
> -	}
> -	return __kmalloc_node(size, flags, node);
> -}
> -#else
> -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
> -{
> -	if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
> -		return kmalloc_large_node(size, flags, node);
> -
> -	return __kmalloc_node(size, flags, node);
> +	return kmalloc_node(size, flags, NUMA_NO_NODE);
>  }
> -#endif
>  
>  /**
>   * kmalloc_array - allocate memory for an array.
> diff --git a/mm/slab.c b/mm/slab.c
> index c5ffe54c207a..b0aaca017f42 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -3507,22 +3507,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
>  }
>  EXPORT_SYMBOL(kmem_cache_alloc_bulk);
>  
> -#ifdef CONFIG_TRACING
> -void *
> -kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
> -{
> -	void *ret;
> -
> -	ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);
> -
> -	ret = kasan_kmalloc(cachep, ret, size, flags);
> -	trace_kmalloc(_RET_IP_, ret,
> -		      size, cachep->size, flags);
> -	return ret;
> -}
> -EXPORT_SYMBOL(kmem_cache_alloc_trace);
> -#endif
> -
>  #ifdef CONFIG_TRACING
>  void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
>  				  gfp_t flags,
> diff --git a/mm/slub.c b/mm/slub.c
> index 2a2be2a8a5d0..892988990da7 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -3216,18 +3216,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *l
>  	return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size);
>  }
>  
> -
> -#ifdef CONFIG_TRACING
> -void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
> -{
> -	void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
> -	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
> -	ret = kasan_kmalloc(s, ret, size, gfpflags);
> -	return ret;
> -}
> -EXPORT_SYMBOL(kmem_cache_alloc_trace);
> -#endif
> -
>  void *__kmem_cache_alloc_node(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags,
>  			      int node, unsigned long caller __maybe_unused)
>  {

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ