[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aUkv51yk8lgzW9jv@milan>
Date: Mon, 22 Dec 2025 12:47:51 +0100
From: Uladzislau Rezki <urezki@...il.com>
To: Dev Jain <dev.jain@....com>
Cc: catalin.marinas@....com, will@...nel.org, urezki@...il.com,
akpm@...ux-foundation.org, tytso@....edu, adilger.kernel@...ger.ca,
cem@...nel.org, ryan.roberts@....com, anshuman.khandual@....com,
shijie@...amperecomputing.com, yang@...amperecomputing.com,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org, npiggin@...il.com, willy@...radead.org,
david@...nel.org, ziy@...dia.com
Subject: Re: [RESEND RFC PATCH 1/2] mm/vmalloc: Do not align size to huge size
On Fri, Dec 12, 2025 at 09:57:00AM +0530, Dev Jain wrote:
> vmalloc() consists of the following:
>
> (1) find empty space in the vmalloc space -> (2) get physical pages from
> the buddy system -> (3) map the pages into the pagetable.
>
> It turns out that the cost of (1) and (3) is pretty insignificant. Hence,
> the cost of vmalloc becomes highly sensitive to physical memory allocation
> time.
>
> Currently, if we decide to use huge mappings, apart from aligning the start
> of the target vm_struct region to the huge-alignment, we also align the
> size. This does not seem to produce any benefit (apart from simplification
> of the code), and there is a clear disadvantage - as mentioned above, the
> main cost of vmalloc comes from its interaction with the buddy system, and
> thus requesting more memory than was requested by the caller is suboptimal
> and unnecessary.
>
> This change is also motivated due to the next patch ("arm64/mm: Enable
> vmalloc-huge by default"). Suppose that some user of vmalloc maps 17 pages,
> uses that mapping for an extremely short time, and vfree's it. That patch,
> without this patch, on arm64 will ultimately map 16 * 2 = 32 pages in a
> contiguous way. Since the mapping is used for a very short time, it is
> likely that the extra cost of mapping 15 pages defeats any benefit from
> reduced TLB pressure, and regresses that code path.
>
> Signed-off-by: Dev Jain <dev.jain@....com>
> ---
> mm/vmalloc.c | 38 ++++++++++++++++++++++++++++++--------
> 1 file changed, 30 insertions(+), 8 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index ecbac900c35f..389225a6f7ef 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -654,7 +654,7 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
> int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> pgprot_t prot, struct page **pages, unsigned int page_shift)
> {
> - unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
> + unsigned int i, step, nr = (end - addr) >> PAGE_SHIFT;
>
> WARN_ON(page_shift < PAGE_SHIFT);
>
> @@ -662,7 +662,8 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> page_shift == PAGE_SHIFT)
> return vmap_small_pages_range_noflush(addr, end, prot, pages);
>
> - for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
> + step = 1U << (page_shift - PAGE_SHIFT);
> + for (i = 0; i < ALIGN_DOWN(nr, step); i += step) {
> int err;
>
> err = vmap_range_noflush(addr, addr + (1UL << page_shift),
> @@ -673,8 +674,9 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
>
> addr += 1UL << page_shift;
> }
> -
> - return 0;
> + if (IS_ALIGNED(nr, step))
> + return 0;
> + return vmap_small_pages_range_noflush(addr, end, prot, pages + i);
> }
>
Can we improve the readability?
<snip>
index 25a4178188ee..14ca019b57af 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -655,6 +655,8 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
unsigned int i, step, nr = (end - addr) >> PAGE_SHIFT;
+ unsigned int nr_aligned;
+ unsigned long chunk_size;
WARN_ON(page_shift < PAGE_SHIFT);
@@ -662,20 +664,24 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
page_shift == PAGE_SHIFT)
return vmap_small_pages_range_noflush(addr, end, prot, pages);
- step = 1U << (page_shift - PAGE_SHIFT);
- for (i = 0; i < ALIGN_DOWN(nr, step); i += step) {
- int err;
+ step = 1U << (page_shift - PAGE_SHIFT); /* small pages per huge chunk. */
+ nr_aligned = ALIGN_DOWN(nr, step);
+ chunk_size = 1UL << page_shift;
- err = vmap_range_noflush(addr, addr + (1UL << page_shift),
+ for (i = 0; i < nr_aligned; i += step) {
+ int err = vmap_range_noflush(addr, addr + chunk_size,
page_to_phys(pages[i]), prot,
page_shift);
if (err)
return err;
- addr += 1UL << page_shift;
+ addr += chunk_size;
}
- if (IS_ALIGNED(nr, step))
+
+ if (i == nr)
return 0;
+
+ /* Map the tail using small pages. */
return vmap_small_pages_range_noflush(addr, end, prot, pages + i);
}
<snip>
> int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
> @@ -3197,7 +3199,7 @@ struct vm_struct *__get_vm_area_node(unsigned long size,
> unsigned long requested_size = size;
>
> BUG_ON(in_interrupt());
> - size = ALIGN(size, 1ul << shift);
> + size = PAGE_ALIGN(size);
> if (unlikely(!size))
> return NULL;
>
> @@ -3353,7 +3355,7 @@ static void vm_reset_perms(struct vm_struct *area)
> * Find the start and end range of the direct mappings to make sure that
> * the vm_unmap_aliases() flush includes the direct map.
> */
> - for (i = 0; i < area->nr_pages; i += 1U << page_order) {
> + for (i = 0; i < ALIGN_DOWN(area->nr_pages, 1U << page_order); i += (1U << page_order)) {
>
nr_blocks?
> unsigned long addr = (unsigned long)page_address(area->pages[i]);
>
> if (addr) {
> @@ -3365,6 +3367,18 @@ static void vm_reset_perms(struct vm_struct *area)
> flush_dmap = 1;
> }
> }
> + for (; i < area->nr_pages; ++i) {
> + unsigned long addr = (unsigned long)page_address(area->pages[i]);
> +
> + if (addr) {
> + unsigned long page_size;
> +
> + page_size = PAGE_SIZE;
> + start = min(addr, start);
> + end = max(addr + page_size, end);
> + flush_dmap = 1;
> + }
> + }
>
> /*
> * Set direct map to something invalid so that it won't be cached if
> @@ -3673,6 +3687,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
> * more permissive.
> */
> if (!order) {
> +single_page:
> while (nr_allocated < nr_pages) {
> unsigned int nr, nr_pages_request;
>
> @@ -3704,13 +3719,18 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
> * If zero or pages were obtained partly,
> * fallback to a single page allocator.
> */
> - if (nr != nr_pages_request)
> + if (nr != nr_pages_request) {
> + order = 0;
> break;
> + }
> }
> }
>
> /* High-order pages or fallback path if "bulk" fails. */
> while (nr_allocated < nr_pages) {
> + if (nr_pages - nr_allocated < (1UL << order)) {
> + goto single_page;
> + }
> if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
> break;
>
Yes, it requires more attention. That "goto single_page" should be
eliminated, IMO. We should not jump between blocks, logically the
single_page belongs to "order-0 alloc path".
Probably it requires more refactoring to simplify it.
>
> @@ -5179,7 +5199,9 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v,
>
> memset(counters, 0, nr_node_ids * sizeof(unsigned int));
>
> - for (nr = 0; nr < v->nr_pages; nr += step)
> + for (nr = 0; nr < ALIGN_DOWN(v->nr_pages, step); nr += step)
> + counters[page_to_nid(v->pages[nr])] += step;
> + for (; nr < v->nr_pages; ++nr)
> counters[page_to_nid(v->pages[nr])] += step;
>
Can we fit it into one loop? Last tail loop continuous adding step?
--
Uladzislau Rezki
Powered by blists - more mailing lists