[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <0de460d4-b0c6-4d41-bb9e-72af20cf0777@amd.com>
Date: Mon, 21 Apr 2025 10:17:38 +0530
From: Shivank Garg <shivankg@....com>
To: Baoquan He <bhe@...hat.com>, linux-mm@...ck.org
Cc: akpm@...ux-foundation.org, urezki@...il.com, vishal.moola@...il.com,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 5/5] mm/vmalloc.c: return explicit error value in
alloc_vmap_area()
On 4/19/2025 4:06 AM, Baoquan He wrote:
> In codes of alloc_vmap_area(), it returns the upper bound 'vend' to
> indicate if the allocation is successful or failed. That is not very clear.
>
> Here change to return explicit error values and check them to judge if
> allocation is successful.
>
> Signed-off-by: Baoquan He <bhe@...hat.com>
> ---
> mm/vmalloc.c | 27 +++++++++++++--------------
> 1 file changed, 13 insertions(+), 14 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 39e043ba969b..0251402ca5b9 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1698,7 +1698,7 @@ va_clip(struct rb_root *root, struct list_head *head,
> */
> lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
> if (!lva)
> - return -1;
> + return -ENOMEM;
> }
>
> /*
> @@ -1712,7 +1712,7 @@ va_clip(struct rb_root *root, struct list_head *head,
> */
> va->va_start = nva_start_addr + size;
> } else {
> - return -1;
> + return -EINVAL;
> }
>
> if (type != FL_FIT_TYPE) {
> @@ -1741,19 +1741,19 @@ va_alloc(struct vmap_area *va,
>
> /* Check the "vend" restriction. */
> if (nva_start_addr + size > vend)
> - return vend;
> + return -ERANGE;
>
> /* Update the free vmap_area. */
> ret = va_clip(root, head, va, nva_start_addr, size);
> if (WARN_ON_ONCE(ret))
> - return vend;
> + return ret;
>
> return nva_start_addr;
> }
>
> /*
> * Returns a start address of the newly allocated area, if success.
> - * Otherwise a vend is returned that indicates failure.
> + * Otherwise an error value is returned that indicates failure.
> */
> static __always_inline unsigned long
> __alloc_vmap_area(struct rb_root *root, struct list_head *head,
> @@ -1778,14 +1778,13 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
>
> va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
> if (unlikely(!va))
> - return vend;
> + return -ENOENT;
>
> nva_start_addr = va_alloc(va, root, head, size, align, vstart, vend);
> - if (nva_start_addr == vend)
> - return vend;
>
> #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
> - find_vmap_lowest_match_check(root, head, size, align);
> + if (!IS_ERR_VALUE(nva_start_addr))
> + find_vmap_lowest_match_check(root, head, size, align);
> #endif
>
> return nva_start_addr;
> @@ -1915,7 +1914,7 @@ node_alloc(unsigned long size, unsigned long align,
> struct vmap_area *va;
>
> *vn_id = 0;
> - *addr = vend;
> + *addr = -EINVAL;
>
> /*
> * Fallback to a global heap if not vmalloc or there
> @@ -1995,20 +1994,20 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
> }
>
> retry:
> - if (addr == vend) {
> + if (IS_ERR_VALUE(addr)) {
> preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
> addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
> size, align, vstart, vend);
> spin_unlock(&free_vmap_area_lock);
> }
>
> - trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
> + trace_alloc_vmap_area(addr, size, align, vstart, vend, IS_ERR_VALUE(addr));
>
> /*
> - * If an allocation fails, the "vend" address is
> + * If an allocation fails, the error value is
> * returned. Therefore trigger the overflow path.
> */
> - if (unlikely(addr == vend))
> + if (IS_ERR_VALUE(addr))
> goto overflow;
>
> va->va_start = addr;
Reviewed-by: Shivank Garg <shivankg@....com>
Thanks,
Shivank
Powered by blists - more mailing lists