lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4c5852cc-2ca5-432b-8426-01c108df66cb@suse.cz>
Date: Tue, 8 Jul 2025 14:52:45 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Vitaly Wool <vitaly.wool@...sulko.se>, linux-mm@...ck.org
Cc: akpm@...ux-foundation.org, linux-kernel@...r.kernel.org,
 Uladzislau Rezki <urezki@...il.com>, Danilo Krummrich <dakr@...nel.org>,
 Alice Ryhl <aliceryhl@...gle.com>, rust-for-linux@...r.kernel.org,
 Hyeonggon Yoo <42.hyeyoo@...il.com>,
 Roman Gushchin <roman.gushchin@...ux.dev>,
 Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
 "Liam R. Howlett" <Liam.Howlett@...cle.com>
Subject: Re: [PATCH v11 2/4] mm/slub: allow to set node and align in
 k[v]realloc

On 7/7/25 18:49, Vitaly Wool wrote:
> Reimplement k[v]realloc_node() to be able to set node and
> alignment should a user need to do so. In order to do that while
> retaining the maximal backward compatibility, add
> k[v]realloc_node_align() functions and redefine the rest of API
> using these new ones.
> 
> With that change we also provide the ability for the Rust part of
> the kernel to set node and alignment in its K[v]xxx
> [re]allocations.
> 
> Signed-off-by: Vitaly Wool <vitaly.wool@...sulko.se>
> ---
>  include/linux/slab.h | 40 ++++++++++++++++++---------
>  mm/slub.c            | 64 ++++++++++++++++++++++++++++++--------------
>  2 files changed, 71 insertions(+), 33 deletions(-)
> 
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index d5a8ab98035c..13abcf4ada22 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -465,9 +465,15 @@ int kmem_cache_shrink(struct kmem_cache *s);
>  /*
>   * Common kmalloc functions provided by all allocators
>   */
> -void * __must_check krealloc_noprof(const void *objp, size_t new_size,
> -				    gfp_t flags) __realloc_size(2);
> -#define krealloc(...)				alloc_hooks(krealloc_noprof(__VA_ARGS__))
> +void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size,
> +					       unsigned long align,
> +					       gfp_t flags, int nid) __realloc_size(2);
> +#define krealloc_node_noprof(_p, _s, _f, _n) \
> +	krealloc_node_align_noprof(_p, _s, 1, _f, _n)
> +#define krealloc_noprof(...)		krealloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
> +#define krealloc_node_align(...)	alloc_hooks(krealloc_node_align_noprof(__VA_ARGS__))
> +#define krealloc_node(...)		alloc_hooks(krealloc_node_noprof(__VA_ARGS__))
> +#define krealloc(...)			alloc_hooks(krealloc_noprof(__VA_ARGS__))

Hm wonder if krealloc() and krealloc_node_align() would be enough. Is
krealloc_node() only used between patch 3 and 4?
Also perhaps it would be more concise to only have
krealloc_node_align_noprof() with alloc_hooks wrappers filling the
NUMA_NO_NODE (and 1), so we don't need to #define the _noprof variant of
everything. The _noprof callers are rare so they can just always use
krealloc_node_align_noprof() directly and also fill in the NUMA_NO_NODE (and 1).

>  void kfree(const void *objp);
>  void kfree_sensitive(const void *objp);
> @@ -1041,18 +1047,23 @@ static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
>  #define kzalloc(...)				alloc_hooks(kzalloc_noprof(__VA_ARGS__))
>  #define kzalloc_node(_size, _flags, _node)	kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
>  
> -void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) __alloc_size(1);
> -#define kvmalloc_node_noprof(size, flags, node)	\
> -	__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node)
> +void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
> +			     gfp_t flags, int node) __alloc_size(1);
> +#define kvmalloc_node_align_noprof(_size, _align, _flags, _node)	\
> +	__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, NULL), _align, _flags, _node)
> +#define kvmalloc_node_noprof(_size, _flags, _node)	\
> +	kvmalloc_node_align_noprof(_size, 1, _flags, _node)
> +#define kvmalloc_node_align(...)		\
> +	alloc_hooks(kvmalloc_node_align_noprof(__VA_ARGS__))
>  #define kvmalloc_node(...)			alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__))

Ditto.

>  
> -#define kvmalloc(_size, _flags)			kvmalloc_node(_size, _flags, NUMA_NO_NODE)
> -#define kvmalloc_noprof(_size, _flags)		kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE)
> +#define kvmalloc_noprof(...)			kvmalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
> +#define kvmalloc(...)				alloc_hooks(kvmalloc_noprof(__VA_ARGS__))
>  #define kvzalloc(_size, _flags)			kvmalloc(_size, (_flags)|__GFP_ZERO)
>  
> -#define kvzalloc_node(_size, _flags, _node)	kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
> +#define kvzalloc_node(_s, _f, _n)		kvmalloc_node(_s, (_f)|__GFP_ZERO, _n)
>  #define kmem_buckets_valloc(_b, _size, _flags)	\
> -	alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
> +	alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), 1, _flags, NUMA_NO_NODE))
>  
>  static inline __alloc_size(1, 2) void *
>  kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
> @@ -1068,13 +1079,16 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
>  #define kvmalloc_array_noprof(...)		kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
>  #define kvcalloc_node_noprof(_n,_s,_f,_node)	kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node)
>  #define kvcalloc_noprof(...)			kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
> -
>  #define kvmalloc_array(...)			alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__))
>  #define kvcalloc_node(...)			alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
>  #define kvcalloc(...)				alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
>  
> -void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
> -		__realloc_size(2);
> +void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
> +				  gfp_t flags, int nid) __realloc_size(2);
> +#define kvrealloc_node_align(...)		kvrealloc_node_align_noprof(__VA_ARGS__)
> +#define kvrealloc_node_noprof(_p, _s, _f, _n)	kvrealloc_node_align_noprof(_p, _s, 1, _f, _n)
> +#define kvrealloc_node(...)			alloc_hooks(kvrealloc_node_noprof(__VA_ARGS__))
> +#define kvrealloc_noprof(...)			kvrealloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
>  #define kvrealloc(...)				alloc_hooks(kvrealloc_noprof(__VA_ARGS__))

Ditto.

>  extern void kvfree(const void *addr);
> diff --git a/mm/slub.c b/mm/slub.c
> index c4b64821e680..881244c357dd 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4845,7 +4845,7 @@ void kfree(const void *object)
>  EXPORT_SYMBOL(kfree);
>  
>  static __always_inline __realloc_size(2) void *
> -__do_krealloc(const void *p, size_t new_size, gfp_t flags)
> +__do_krealloc(const void *p, size_t new_size, unsigned long align, gfp_t flags, int nid)
>  {
>  	void *ret;
>  	size_t ks = 0;
> @@ -4859,6 +4859,20 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>  	if (!kasan_check_byte(p))
>  		return NULL;
>  
> +	/* refuse to proceed if alignment is bigger than what kmalloc() provides */
> +	if (!IS_ALIGNED((unsigned long)p, align) || new_size < align)
> +		return NULL;
> +
> +	/*
> +	 * If reallocation is not necessary (e. g. the new size is less
> +	 * than the current allocated size), the current allocation will be
> +	 * preserved unless __GFP_THISNODE is set. In the latter case a new
> +	 * allocation on the requested node will be attempted.
> +	 */
> +	if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
> +		     nid != page_to_nid(vmalloc_to_page(p)))

We need virt_to_page() here not vmalloc_to_page().

> +		goto alloc_new;
> +
>  	if (is_kfence_address(p)) {
>  		ks = orig_size = kfence_ksize(p);
>  	} else {

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ