[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <e4e4aaae-92be-4cd2-9435-dccad99961bf@linux.alibaba.com>
Date: Wed, 23 Apr 2025 15:30:02 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: Nico Pache <npache@...hat.com>, linux-mm@...ck.org,
linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org
Cc: akpm@...ux-foundation.org, corbet@....net, rostedt@...dmis.org,
mhiramat@...nel.org, mathieu.desnoyers@...icios.com, david@...hat.com,
baohua@...nel.org, ryan.roberts@....com, willy@...radead.org,
peterx@...hat.com, ziy@...dia.com, wangkefeng.wang@...wei.com,
usamaarif642@...il.com, sunnanyong@...wei.com, vishal.moola@...il.com,
thomas.hellstrom@...ux.intel.com, yang@...amperecomputing.com,
kirill.shutemov@...ux.intel.com, aarcange@...hat.com, raquini@...hat.com,
dev.jain@....com, anshuman.khandual@....com, catalin.marinas@....com,
tiwai@...e.de, will@...nel.org, dave.hansen@...ux.intel.com, jack@...e.cz,
cl@...two.org, jglisse@...gle.com, surenb@...gle.com, zokeefe@...gle.com,
hannes@...xchg.org, rientjes@...gle.com, mhocko@...e.com,
rdunlap@...radead.org
Subject: Re: [PATCH v4 05/12] khugepaged: generalize __collapse_huge_page_*
for mTHP support
On 2025/4/17 08:02, Nico Pache wrote:
> generalize the order of the __collapse_huge_page_* functions
> to support future mTHP collapse.
>
> mTHP collapse can suffer from incosistant behavior, and memory waste
> "creep". disable swapin and shared support for mTHP collapse.
>
> No functional changes in this patch.
>
> Co-developed-by: Dev Jain <dev.jain@....com>
> Signed-off-by: Dev Jain <dev.jain@....com>
> Signed-off-by: Nico Pache <npache@...hat.com>
> ---
> mm/khugepaged.c | 46 ++++++++++++++++++++++++++++------------------
> 1 file changed, 28 insertions(+), 18 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 883e9a46359f..5e9272ab82da 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -565,15 +565,17 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> unsigned long address,
> pte_t *pte,
> struct collapse_control *cc,
> - struct list_head *compound_pagelist)
> + struct list_head *compound_pagelist,
> + u8 order)
> {
> struct page *page = NULL;
> struct folio *folio = NULL;
> pte_t *_pte;
> int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
> bool writable = false;
> + int scaled_none = khugepaged_max_ptes_none >> (HPAGE_PMD_ORDER - order);
>
> - for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
> + for (_pte = pte; _pte < pte + (1 << order);
> _pte++, address += PAGE_SIZE) {
> pte_t pteval = ptep_get(_pte);
> if (pte_none(pteval) || (pte_present(pteval) &&
> @@ -581,7 +583,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> ++none_or_zero;
> if (!userfaultfd_armed(vma) &&
> (!cc->is_khugepaged ||
> - none_or_zero <= khugepaged_max_ptes_none)) {
> + none_or_zero <= scaled_none)) {
> continue;
> } else {
> result = SCAN_EXCEED_NONE_PTE;
> @@ -609,8 +611,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> /* See hpage_collapse_scan_pmd(). */
> if (folio_maybe_mapped_shared(folio)) {
> ++shared;
> - if (cc->is_khugepaged &&
> - shared > khugepaged_max_ptes_shared) {
> + if (order != HPAGE_PMD_ORDER || (cc->is_khugepaged &&
> + shared > khugepaged_max_ptes_shared)) {
> result = SCAN_EXCEED_SHARED_PTE;
> count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
> goto out;
> @@ -711,13 +713,14 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
> struct vm_area_struct *vma,
> unsigned long address,
> spinlock_t *ptl,
> - struct list_head *compound_pagelist)
> + struct list_head *compound_pagelist,
> + u8 order)
> {
> struct folio *src, *tmp;
> pte_t *_pte;
> pte_t pteval;
>
> - for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
> + for (_pte = pte; _pte < pte + (1 << order);
> _pte++, address += PAGE_SIZE) {
> pteval = ptep_get(_pte);
> if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
> @@ -764,7 +767,8 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
> pmd_t *pmd,
> pmd_t orig_pmd,
> struct vm_area_struct *vma,
> - struct list_head *compound_pagelist)
> + struct list_head *compound_pagelist,
> + u8 order)
> {
> spinlock_t *pmd_ptl;
>
> @@ -781,7 +785,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
> * Release both raw and compound pages isolated
> * in __collapse_huge_page_isolate.
> */
> - release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
> + release_pte_pages(pte, pte + (1 << order), compound_pagelist);
> }
>
> /*
> @@ -802,7 +806,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
> static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
> pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
> unsigned long address, spinlock_t *ptl,
> - struct list_head *compound_pagelist)
> + struct list_head *compound_pagelist, u8 order)
> {
> unsigned int i;
> int result = SCAN_SUCCEED;
> @@ -810,7 +814,7 @@ static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
> /*
> * Copying pages' contents is subject to memory poison at any iteration.
> */
> - for (i = 0; i < HPAGE_PMD_NR; i++) {
> + for (i = 0; i < (1 << order); i++) {
> pte_t pteval = ptep_get(pte + i);
> struct page *page = folio_page(folio, i);
> unsigned long src_addr = address + i * PAGE_SIZE;
> @@ -829,10 +833,10 @@ static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
>
> if (likely(result == SCAN_SUCCEED))
> __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
> - compound_pagelist);
> + compound_pagelist, order);
> else
> __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
> - compound_pagelist);
> + compound_pagelist, order);
>
> return result;
> }
> @@ -1000,11 +1004,11 @@ static int check_pmd_still_valid(struct mm_struct *mm,
> static int __collapse_huge_page_swapin(struct mm_struct *mm,
> struct vm_area_struct *vma,
> unsigned long haddr, pmd_t *pmd,
> - int referenced)
> + int referenced, u8 order)
> {
> int swapped_in = 0;
> vm_fault_t ret = 0;
> - unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
> + unsigned long address, end = haddr + (PAGE_SIZE << order);
> int result;
> pte_t *pte = NULL;
> spinlock_t *ptl;
> @@ -1035,6 +1039,12 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
> if (!is_swap_pte(vmf.orig_pte))
> continue;
>
> + /* Dont swapin for mTHP collapse */
> + if (order != HPAGE_PMD_ORDER) {
> + result = SCAN_EXCEED_SWAP_PTE;
> + goto out;
> + }
IMO, this check should move into hpage_collapse_scan_pmd(), that means
if we scan the swap ptes for mTHP collapse, then we can return
'SCAN_EXCEED_SWAP_PTE' to abort the collapse earlier.
The logic is the same as how you handle the shared ptes for mTHP.
> vmf.pte = pte;
> vmf.ptl = ptl;
> ret = do_swap_page(&vmf);
> @@ -1154,7 +1164,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> * that case. Continuing to collapse causes inconsistency.
> */
> result = __collapse_huge_page_swapin(mm, vma, address, pmd,
> - referenced);
> + referenced, HPAGE_PMD_ORDER);
> if (result != SCAN_SUCCEED)
> goto out_nolock;
> }
> @@ -1201,7 +1211,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
> pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
> if (pte) {
> result = __collapse_huge_page_isolate(vma, address, pte, cc,
> - &compound_pagelist);
> + &compound_pagelist, HPAGE_PMD_ORDER);
> spin_unlock(pte_ptl);
> } else {
> result = SCAN_PMD_NULL;
> @@ -1231,7 +1241,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
>
> result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
> vma, address, pte_ptl,
> - &compound_pagelist);
> + &compound_pagelist, HPAGE_PMD_ORDER);
> pte_unmap(pte);
> if (unlikely(result != SCAN_SUCCEED))
> goto out_up_write;
Powered by blists - more mailing lists