[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <YqAbmXX7/Tujtekg@MiWiFi-R3L-srv>
Date: Wed, 8 Jun 2022 11:46:33 +0800
From: Baoquan He <bhe@...hat.com>
To: "Uladzislau Rezki (Sony)" <urezki@...il.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
LKML <linux-kernel@...r.kernel.org>,
Christoph Hellwig <hch@...radead.org>,
Matthew Wilcox <willy@...radead.org>,
Nicholas Piggin <npiggin@...il.com>,
Oleksiy Avramchenko <oleksiy.avramchenko@...y.com>
Subject: Re: [PATCH 2/5] mm/vmalloc: Extend __alloc_vmap_area() with extra
arguments
On 06/07/22 at 11:34am, Uladzislau Rezki (Sony) wrote:
> It implies that __alloc_vmap_area() allocates only from the
> global vmap space, therefore a list-head and rb-tree, which
> represent a free vmap space, are not passed as parameters to
> this function and are accessed directly from this function.
>
> Extend the __alloc_vmap_area() and other dependent functions
> to have a possibility to allocate from different trees making
> an interface common and not specific.
>
> There is no functional change as a result of this patch.
LGTM,
Reviewed-by: Baoquan He <bhe@...hat.com>
>
> Signed-off-by: Uladzislau Rezki (Sony) <urezki@...il.com>
> ---
> mm/vmalloc.c | 30 ++++++++++++++++--------------
> 1 file changed, 16 insertions(+), 14 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 0102d6d5fcdf..745e89eb6ca1 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1234,15 +1234,15 @@ is_within_this_va(struct vmap_area *va, unsigned long size,
> * overhead.
> */
> static __always_inline struct vmap_area *
> -find_vmap_lowest_match(unsigned long size, unsigned long align,
> - unsigned long vstart, bool adjust_search_size)
> +find_vmap_lowest_match(struct rb_root *root, unsigned long size,
> + unsigned long align, unsigned long vstart, bool adjust_search_size)
> {
> struct vmap_area *va;
> struct rb_node *node;
> unsigned long length;
>
> /* Start from the root. */
> - node = free_vmap_area_root.rb_node;
> + node = root->rb_node;
>
> /* Adjust the search size for alignment overhead. */
> length = adjust_search_size ? size + align - 1 : size;
> @@ -1370,9 +1370,9 @@ classify_va_fit_type(struct vmap_area *va,
> }
>
> static __always_inline int
> -adjust_va_to_fit_type(struct vmap_area *va,
> - unsigned long nva_start_addr, unsigned long size,
> - enum fit_type type)
> +adjust_va_to_fit_type(struct rb_root *root, struct list_head *head,
> + struct vmap_area *va, unsigned long nva_start_addr,
> + unsigned long size, enum fit_type type)
> {
> struct vmap_area *lva = NULL;
>
> @@ -1384,7 +1384,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
> * V NVA V
> * |---------------|
> */
> - unlink_va_augment(va, &free_vmap_area_root);
> + unlink_va_augment(va, root);
> kmem_cache_free(vmap_area_cachep, va);
> } else if (type == LE_FIT_TYPE) {
> /*
> @@ -1462,8 +1462,7 @@ adjust_va_to_fit_type(struct vmap_area *va,
> augment_tree_propagate_from(va);
>
> if (lva) /* type == NE_FIT_TYPE */
> - insert_vmap_area_augment(lva, &va->rb_node,
> - &free_vmap_area_root, &free_vmap_area_list);
> + insert_vmap_area_augment(lva, &va->rb_node, root, head);
> }
>
> return 0;
> @@ -1474,7 +1473,8 @@ adjust_va_to_fit_type(struct vmap_area *va,
> * Otherwise a vend is returned that indicates failure.
> */
> static __always_inline unsigned long
> -__alloc_vmap_area(unsigned long size, unsigned long align,
> +__alloc_vmap_area(struct rb_root *root, struct list_head *head,
> + unsigned long size, unsigned long align,
> unsigned long vstart, unsigned long vend)
> {
> bool adjust_search_size = true;
> @@ -1495,7 +1495,7 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
> if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size))
> adjust_search_size = false;
>
> - va = find_vmap_lowest_match(size, align, vstart, adjust_search_size);
> + va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
> if (unlikely(!va))
> return vend;
>
> @@ -1514,7 +1514,7 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
> return vend;
>
> /* Update the free vmap_area. */
> - ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
> + ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size, type);
> if (ret)
> return vend;
>
> @@ -1605,7 +1605,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
>
> retry:
> preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
> - addr = __alloc_vmap_area(size, align, vstart, vend);
> + addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
> + size, align, vstart, vend);
> spin_unlock(&free_vmap_area_lock);
>
> /*
> @@ -3886,7 +3887,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
> /* It is a BUG(), but trigger recovery instead. */
> goto recovery;
>
> - ret = adjust_va_to_fit_type(va, start, size, type);
> + ret = adjust_va_to_fit_type(&free_vmap_area_root,
> + &free_vmap_area_list, va, start, size, type);
> if (unlikely(ret))
> goto recovery;
>
> --
> 2.30.2
>
>
Powered by blists - more mailing lists