[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z-VNX0IA8Juh6ALx@pc636>
Date: Thu, 27 Mar 2025 14:06:39 +0100
From: Uladzislau Rezki <urezki@...il.com>
To: Ryan Roberts <ryan.roberts@....com>
Cc: Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
Pasha Tatashin <pasha.tatashin@...een.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Uladzislau Rezki <urezki@...il.com>,
Christoph Hellwig <hch@...radead.org>,
David Hildenbrand <david@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Mark Rutland <mark.rutland@....com>,
Anshuman Khandual <anshuman.khandual@....com>,
Alexandre Ghiti <alexghiti@...osinc.com>,
Kevin Brodsky <kevin.brodsky@....com>,
linux-arm-kernel@...ts.infradead.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 10/11] mm/vmalloc: Enter lazy mmu mode while
manipulating vmalloc ptes
On Tue, Mar 04, 2025 at 03:04:40PM +0000, Ryan Roberts wrote:
> Wrap vmalloc's pte table manipulation loops with
> arch_enter_lazy_mmu_mode() / arch_leave_lazy_mmu_mode(). This provides
> the arch code with the opportunity to optimize the pte manipulations.
>
> Note that vmap_pfn() already uses lazy mmu mode since it delegates to
> apply_to_page_range() which enters lazy mmu mode for both user and
> kernel mappings.
>
> These hooks will shortly be used by arm64 to improve vmalloc
> performance.
>
> Signed-off-by: Ryan Roberts <ryan.roberts@....com>
> ---
> mm/vmalloc.c | 14 ++++++++++++++
> 1 file changed, 14 insertions(+)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 6111ce900ec4..b63ca0b7dd40 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -104,6 +104,9 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
> pte = pte_alloc_kernel_track(pmd, addr, mask);
> if (!pte)
> return -ENOMEM;
> +
> + arch_enter_lazy_mmu_mode();
> +
> do {
> if (unlikely(!pte_none(ptep_get(pte)))) {
> if (pfn_valid(pfn)) {
> @@ -127,6 +130,8 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
> set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
> pfn++;
> } while (pte += PFN_DOWN(size), addr += size, addr != end);
> +
> + arch_leave_lazy_mmu_mode();
> *mask |= PGTBL_PTE_MODIFIED;
> return 0;
> }
> @@ -354,6 +359,8 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
> unsigned long size = PAGE_SIZE;
>
> pte = pte_offset_kernel(pmd, addr);
> + arch_enter_lazy_mmu_mode();
> +
> do {
> #ifdef CONFIG_HUGETLB_PAGE
> size = arch_vmap_pte_range_unmap_size(addr, pte);
> @@ -370,6 +377,8 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
> ptent = ptep_get_and_clear(&init_mm, addr, pte);
> WARN_ON(!pte_none(ptent) && !pte_present(ptent));
> } while (pte += (size >> PAGE_SHIFT), addr += size, addr != end);
> +
> + arch_leave_lazy_mmu_mode();
> *mask |= PGTBL_PTE_MODIFIED;
> }
>
> @@ -515,6 +524,9 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
> pte = pte_alloc_kernel_track(pmd, addr, mask);
> if (!pte)
> return -ENOMEM;
> +
> + arch_enter_lazy_mmu_mode();
> +
> do {
> struct page *page = pages[*nr];
>
> @@ -528,6 +540,8 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
> set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
> (*nr)++;
> } while (pte++, addr += PAGE_SIZE, addr != end);
> +
> + arch_leave_lazy_mmu_mode();
> *mask |= PGTBL_PTE_MODIFIED;
> return 0;
> }
> --
> 2.43.0
>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@...il.com>
--
Uladzislau Rezki
Powered by blists - more mailing lists