[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <048076e6-19a1-4440-bd54-12a9b7f1a09c@lucifer.local>
Date: Thu, 8 Jan 2026 17:05:40 +0000
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Nico Pache <npache@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-trace-kernel@...r.kernel.org,
linux-mm@...ck.org, linux-doc@...r.kernel.org, david@...hat.com,
ziy@...dia.com, baolin.wang@...ux.alibaba.com, Liam.Howlett@...cle.com,
ryan.roberts@....com, dev.jain@....com, corbet@....net,
rostedt@...dmis.org, mhiramat@...nel.org,
mathieu.desnoyers@...icios.com, akpm@...ux-foundation.org,
baohua@...nel.org, willy@...radead.org, peterx@...hat.com,
wangkefeng.wang@...wei.com, usamaarif642@...il.com,
sunnanyong@...wei.com, vishal.moola@...il.com,
thomas.hellstrom@...ux.intel.com, yang@...amperecomputing.com,
kas@...nel.org, aarcange@...hat.com, raquini@...hat.com,
anshuman.khandual@....com, catalin.marinas@....com, tiwai@...e.de,
will@...nel.org, dave.hansen@...ux.intel.com, jack@...e.cz,
cl@...two.org, jglisse@...gle.com, surenb@...gle.com,
zokeefe@...gle.com, hannes@...xchg.org, rientjes@...gle.com,
mhocko@...e.com, rdunlap@...radead.org, hughd@...gle.com,
richard.weiyang@...il.com, lance.yang@...ux.dev, vbabka@...e.cz,
rppt@...nel.org, jannh@...gle.com, pfalcato@...e.de
Subject: Re: [PATCH v13 mm-new 03/16] khugepaged: generalize
hugepage_vma_revalidate for mTHP support
OK I'll stop reporting the merge conflicts from the that series but yeah here
too I guess it kills a whole bunch :)
On Mon, Dec 01, 2025 at 10:46:14AM -0700, Nico Pache wrote:
> For khugepaged to support different mTHP orders, we must generalize this
> to check if the PMD is not shared by another VMA and that the order is
> enabled.
>
> No functional change in this patch. Also correct a comment about the
> functionality of the revalidation.
>
> Reviewed-by: Wei Yang <richard.weiyang@...il.com>
> Reviewed-by: Lance Yang <lance.yang@...ux.dev>
> Reviewed-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
> Acked-by: David Hildenbrand <david@...hat.com>
> Co-developed-by: Dev Jain <dev.jain@....com>
> Signed-off-by: Dev Jain <dev.jain@....com>
> Signed-off-by: Nico Pache <npache@...hat.com>
> ---
> mm/khugepaged.c | 20 +++++++++++---------
> 1 file changed, 11 insertions(+), 9 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 433ea7283488..69fc6b41f010 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -892,14 +892,13 @@ static int collapse_find_target_node(struct collapse_control *cc)
>
> /*
> * If mmap_lock temporarily dropped, revalidate vma
> - * before taking mmap_lock.
> + * after taking the mmap_lock again.
> * Returns enum scan_result value.
> */
>
> static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
> - bool expect_anon,
> - struct vm_area_struct **vmap,
> - struct collapse_control *cc)
> + bool expect_anon, struct vm_area_struct **vmap,
> + struct collapse_control *cc, unsigned int order)
> {
> struct vm_area_struct *vma;
> enum tva_type type = cc->is_khugepaged ? TVA_KHUGEPAGED :
> @@ -912,15 +911,16 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
> if (!vma)
> return SCAN_VMA_NULL;
>
> + /* Always check the PMD order to ensure its not shared by another VMA */
> if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
> return SCAN_ADDRESS_RANGE;
> - if (!thp_vma_allowable_order(vma, vma->vm_flags, type, PMD_ORDER))
> + if (!thp_vma_allowable_orders(vma, vma->vm_flags, type, BIT(order)))
> return SCAN_VMA_CHECK;
> /*
> * Anon VMA expected, the address may be unmapped then
> * remapped to file after khugepaged reaquired the mmap_lock.
> *
> - * thp_vma_allowable_order may return true for qualified file
> + * thp_vma_allowable_orders may return true for qualified file
> * vmas.
> */
> if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
> @@ -1117,7 +1117,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> goto out_nolock;
>
> mmap_read_lock(mm);
> - result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
> + result = hugepage_vma_revalidate(mm, address, true, &vma, cc,
> + HPAGE_PMD_ORDER);
> if (result != SCAN_SUCCEED) {
> mmap_read_unlock(mm);
> goto out_nolock;
> @@ -1151,7 +1152,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> * mmap_lock.
> */
> mmap_write_lock(mm);
> - result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
> + result = hugepage_vma_revalidate(mm, address, true, &vma, cc,
> + HPAGE_PMD_ORDER);
> if (result != SCAN_SUCCEED)
> goto out_up_write;
> /* check if the pmd is still valid */
> @@ -2814,7 +2816,7 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
> mmap_read_lock(mm);
> mmap_locked = true;
> result = hugepage_vma_revalidate(mm, addr, false, &vma,
> - cc);
> + cc, HPAGE_PMD_ORDER);
> if (result != SCAN_SUCCEED) {
> last_fail = result;
> goto out_nolock;
> --
> 2.51.1
>
Powered by blists - more mailing lists