[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-Id: <A2FBF97F-5122-4C06-A1EC-F997E01A8672@konsulko.se>
Date: Fri, 27 Jun 2025 20:10:44 +0200
From: Vitaly Wool <vitaly.wool@...sulko.se>
To: Tamir Duberstein <tamird@...il.com>
Cc: linux-mm@...ck.org,
akpm@...ux-foundation.org,
linux-kernel@...r.kernel.org,
Uladzislau Rezki <urezki@...il.com>,
Danilo Krummrich <dakr@...nel.org>,
Alice Ryhl <aliceryhl@...gle.com>,
rust-for-linux@...r.kernel.org
Subject: Re: [PATCH v4 2/4] mm/slub: allow to set node and align in
k[v]realloc
> On Jun 26, 2025, at 10:53 PM, Tamir Duberstein <tamird@...il.com> wrote:
>
> On Thu, Jun 26, 2025 at 1:39 AM Vitaly Wool <vitaly.wool@...sulko.se> wrote:
>>
>> Reimplement k[v]realloc() to be able to set node and alignment
>> should a user need to do so. Rename the respective functions to
>> k[v]realloc_node() to better match what they actually do now and
>> introduce macros for k[v]realloc() for backward compatibility.
>>
>> With that change we also provide the ability for the Rust part of
>> the kernel to set node and aligmnent in its K[v]xxx [re]allocations.
>>
>> Signed-off-by: Vitaly Wool <vitaly.wool@...sulko.se>
>
> Hi Vitaly, there is a typo in the subject line: it should be slab, not slub.
Thanks, corrected.
>
>> ---
>> include/linux/slab.h | 12 ++++++++----
>> mm/slub.c | 33 ++++++++++++++++++++++-----------
>> 2 files changed, 30 insertions(+), 15 deletions(-)
>>
>> diff --git a/include/linux/slab.h b/include/linux/slab.h
>> index d5a8ab98035c..119f100978c8 100644
>> --- a/include/linux/slab.h
>> +++ b/include/linux/slab.h
>> @@ -465,9 +465,11 @@ int kmem_cache_shrink(struct kmem_cache *s);
>> /*
>> * Common kmalloc functions provided by all allocators
>> */
>> -void * __must_check krealloc_noprof(const void *objp, size_t new_size,
>> - gfp_t flags) __realloc_size(2);
>> -#define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__))
>> +void * __must_check krealloc_node_noprof(const void *objp, size_t new_size,
>> + gfp_t flags, int nid) __realloc_size(2);
>> +#define krealloc_node(...) alloc_hooks(krealloc_node_noprof(__VA_ARGS__))
>> +#define krealloc_noprof(o, s, f) krealloc_node_noprof(o, s, f, NUMA_NO_NODE)
>> +#define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__))
>>
>> void kfree(const void *objp);
>> void kfree_sensitive(const void *objp);
>> @@ -1073,8 +1075,10 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
>> #define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
>> #define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
>>
>> -void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
>> +void *kvrealloc_node_noprof(const void *p, size_t size, gfp_t flags, int nid)
>> __realloc_size(2);
>> +#define kvrealloc_node(...) alloc_hooks(kvrealloc_node_noprof(__VA_ARGS__))
>> +#define kvrealloc_noprof(p, s, f) kvrealloc_node_noprof(p, s, f, NUMA_NO_NODE)
>> #define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
>>
>> extern void kvfree(const void *addr);
>> diff --git a/mm/slub.c b/mm/slub.c
>> index c4b64821e680..2d5150d075d5 100644
>> --- a/mm/slub.c
>> +++ b/mm/slub.c
>> @@ -4845,7 +4845,7 @@ void kfree(const void *object)
>> EXPORT_SYMBOL(kfree);
>>
>> static __always_inline __realloc_size(2) void *
>> -__do_krealloc(const void *p, size_t new_size, gfp_t flags)
>> +__do_krealloc(const void *p, size_t new_size, gfp_t flags, int nid)
>> {
>> void *ret;
>> size_t ks = 0;
>> @@ -4859,6 +4859,15 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>> if (!kasan_check_byte(p))
>> return NULL;
>>
>> + /*
>> + * it is possible to support reallocation with a different nid, but
>> + * it doesn't go well with the concept of krealloc(). Such
>> + * reallocation should be done explicitly instead.
>> + */
>> + if (WARN(nid != NUMA_NO_NODE && nid != page_to_nid(virt_to_page(p)),
>> + "krealloc() has mismatched nids\n"))
>> + return NULL;
>> +
>> if (is_kfence_address(p)) {
>> ks = orig_size = kfence_ksize(p);
>> } else {
>> @@ -4903,7 +4912,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>> return (void *)p;
>>
>> alloc_new:
>> - ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
>> + ret = kmalloc_node_track_caller_noprof(new_size, flags, nid, _RET_IP_);
>> if (ret && p) {
>> /* Disable KASAN checks as the object's redzone is accessed. */
>> kasan_disable_current();
>> @@ -4919,6 +4928,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>> * @p: object to reallocate memory for.
>> * @new_size: how many bytes of memory are required.
>> * @flags: the type of memory to allocate.
>> + * @nid: NUMA node or NUMA_NO_NODE
>> *
>> * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
>> * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
>> @@ -4947,7 +4957,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>> *
>> * Return: pointer to the allocated memory or %NULL in case of error
>> */
>> -void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
>> +void *krealloc_node_noprof(const void *p, size_t new_size, gfp_t flags, int nid)
>> {
>> void *ret;
>>
>> @@ -4956,13 +4966,13 @@ void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
>> return ZERO_SIZE_PTR;
>> }
>>
>> - ret = __do_krealloc(p, new_size, flags);
>> + ret = __do_krealloc(p, new_size, flags, nid);
>> if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
>> kfree(p);
>>
>> return ret;
>> }
>> -EXPORT_SYMBOL(krealloc_noprof);
>> +EXPORT_SYMBOL(krealloc_node_noprof);
>>
>> static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
>> {
>> @@ -5079,10 +5089,11 @@ void kvfree_sensitive(const void *addr, size_t len)
>> EXPORT_SYMBOL(kvfree_sensitive);
>>
>> /**
>> - * kvrealloc - reallocate memory; contents remain unchanged
>> + * kvrealloc_node - reallocate memory; contents remain unchanged
>> * @p: object to reallocate memory for
>> * @size: the size to reallocate
>> * @flags: the flags for the page level allocator
>> + * @nid: NUMA node id
>> *
>> * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
>> * and @p is not a %NULL pointer, the object pointed to is freed.
>> @@ -5100,17 +5111,17 @@ EXPORT_SYMBOL(kvfree_sensitive);
>> *
>> * Return: pointer to the allocated memory or %NULL in case of error
>> */
>> -void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
>> +void *kvrealloc_node_noprof(const void *p, size_t size, gfp_t flags, int nid)
>> {
>> void *n;
>>
>> if (is_vmalloc_addr(p))
>> - return vrealloc_noprof(p, size, flags);
>> + return vrealloc_node_noprof(p, size, 1, flags, nid);
>>
>> - n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
>> + n = krealloc_node_noprof(p, size, kmalloc_gfp_adjust(flags, size), nid);
>> if (!n) {
>> /* We failed to krealloc(), fall back to kvmalloc(). */
>> - n = kvmalloc_noprof(size, flags);
>> + n = kvmalloc_node_noprof(size, flags, nid);
>> if (!n)
>> return NULL;
>>
>> @@ -5126,7 +5137,7 @@ void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
>>
>> return n;
>> }
>> -EXPORT_SYMBOL(kvrealloc_noprof);
>> +EXPORT_SYMBOL(kvrealloc_node_noprof);
>>
>> struct detached_freelist {
>> struct slab *slab;
>> --
>> 2.39.2
>>
>>
Powered by blists - more mailing lists