lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 29 Apr 2022 16:48:56 +0200
From:   Vlastimil Babka <vbabka@...e.cz>
To:     Hyeonggon Yoo <42.hyeyoo@...il.com>
Cc:     Marco Elver <elver@...gle.com>,
        Matthew WilCox <willy@...radead.org>,
        Christoph Lameter <cl@...ux.com>,
        Pekka Enberg <penberg@...nel.org>,
        David Rientjes <rientjes@...gle.com>,
        Joonsoo Kim <iamjoonsoo.kim@....com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        Roman Gushchin <roman.gushchin@...ux.dev>, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 20/23] mm/slab_common: factor out __do_kmalloc_node()

On 4/14/22 10:57, Hyeonggon Yoo wrote:
> Factor out common code into __do_kmalloc_node().
> 
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@...il.com>

Looks good but let's see how things look like after changes to previous patches.

> ---
>  mm/slab_common.c | 27 ++++++++++-----------------
>  1 file changed, 10 insertions(+), 17 deletions(-)
> 
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 6abe7f61c197..af563e64e8aa 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -919,7 +919,9 @@ void free_large_kmalloc(struct folio *folio, void *object)
>  	__free_pages(folio_page(folio, 0), order);
>  }
>  
> -void *__kmalloc_node(size_t size, gfp_t flags, int node)
> +static __always_inline
> +void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
> +			unsigned long caller __maybe_unused)
>  {
>  	struct kmem_cache *s;
>  	void *ret;
> @@ -932,31 +934,22 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
>  	if (unlikely(ZERO_OR_NULL_PTR(s)))
>  		return s;
>  
> -	ret = __kmem_cache_alloc_node(s, NULL, flags, node, _RET_IP_);
> +	ret = __kmem_cache_alloc_node(s, NULL, flags, node, caller);
>  	ret = kasan_kmalloc(s, ret, size, flags);
>  
>  	return ret;
>  }
> +
> +void *__kmalloc_node(size_t size, gfp_t flags, int node)
> +{
> +	return __do_kmalloc_node(size, flags, node, _RET_IP_);
> +}
>  EXPORT_SYMBOL(__kmalloc_node);
>  
>  void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
>  					int node, unsigned long caller)
>  {
> -	struct kmem_cache *s;
> -	void *ret;
> -
> -	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
> -		return kmalloc_large_node(size, gfpflags, node);
> -
> -	s = kmalloc_slab(size, gfpflags);
> -
> -	if (unlikely(ZERO_OR_NULL_PTR(s)))
> -		return s;
> -
> -	ret = __kmem_cache_alloc_node(s, NULL, gfpflags, node, caller);
> -	ret = kasan_kmalloc(s, ret, size, gfpflags);
> -
> -	return ret;
> +	return __do_kmalloc_node(size, flags, node, caller);
>  }
>  EXPORT_SYMBOL(__kmalloc_node_track_caller);
>  

Powered by blists - more mailing lists