[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250211003028.213461-6-npache@redhat.com>
Date: Mon, 10 Feb 2025 17:30:24 -0700
From: Nico Pache <npache@...hat.com>
To: linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org,
linux-mm@...ck.org
Cc: ryan.roberts@....com,
anshuman.khandual@....com,
catalin.marinas@....com,
cl@...two.org,
vbabka@...e.cz,
mhocko@...e.com,
apopple@...dia.com,
dave.hansen@...ux.intel.com,
will@...nel.org,
baohua@...nel.org,
jack@...e.cz,
srivatsa@...il.mit.edu,
haowenchao22@...il.com,
hughd@...gle.com,
aneesh.kumar@...nel.org,
yang@...amperecomputing.com,
peterx@...hat.com,
ioworker0@...il.com,
wangkefeng.wang@...wei.com,
ziy@...dia.com,
jglisse@...gle.com,
surenb@...gle.com,
vishal.moola@...il.com,
zokeefe@...gle.com,
zhengqi.arch@...edance.com,
jhubbard@...dia.com,
21cnbao@...il.com,
willy@...radead.org,
kirill.shutemov@...ux.intel.com,
david@...hat.com,
aarcange@...hat.com,
raquini@...hat.com,
dev.jain@....com,
sunnanyong@...wei.com,
usamaarif642@...il.com,
audra@...hat.com,
akpm@...ux-foundation.org,
rostedt@...dmis.org,
mathieu.desnoyers@...icios.com,
tiwai@...e.de
Subject: [RFC v2 5/9] khugepaged: generalize __collapse_huge_page_* for mTHP support
generalize the order of the __collapse_huge_page_* functions
to support future mTHP collapse.
mTHP collapse can suffer from incosistant behavior, and memory waste
"creep". disable swapin and shared support for mTHP collapse.
No functional changes in this patch.
Signed-off-by: Nico Pache <npache@...hat.com>
---
mm/khugepaged.c | 48 ++++++++++++++++++++++++++++--------------------
1 file changed, 28 insertions(+), 20 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 0cfcdc11cabd..3776055bd477 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -565,15 +565,17 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
unsigned long address,
pte_t *pte,
struct collapse_control *cc,
- struct list_head *compound_pagelist)
+ struct list_head *compound_pagelist,
+ u8 order)
{
struct page *page = NULL;
struct folio *folio = NULL;
pte_t *_pte;
int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
bool writable = false;
+ int scaled_none = khugepaged_max_ptes_none >> (HPAGE_PMD_ORDER - order);
- for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
+ for (_pte = pte; _pte < pte + (1 << order);
_pte++, address += PAGE_SIZE) {
pte_t pteval = ptep_get(_pte);
if (pte_none(pteval) || (pte_present(pteval) &&
@@ -581,7 +583,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
++none_or_zero;
if (!userfaultfd_armed(vma) &&
(!cc->is_khugepaged ||
- none_or_zero <= khugepaged_max_ptes_none)) {
+ none_or_zero <= scaled_none)) {
continue;
} else {
result = SCAN_EXCEED_NONE_PTE;
@@ -609,8 +611,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
/* See khugepaged_scan_pmd(). */
if (folio_likely_mapped_shared(folio)) {
++shared;
- if (cc->is_khugepaged &&
- shared > khugepaged_max_ptes_shared) {
+ if (order != HPAGE_PMD_ORDER || (cc->is_khugepaged &&
+ shared > khugepaged_max_ptes_shared)) {
result = SCAN_EXCEED_SHARED_PTE;
count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
goto out;
@@ -711,14 +713,15 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
struct vm_area_struct *vma,
unsigned long address,
spinlock_t *ptl,
- struct list_head *compound_pagelist)
+ struct list_head *compound_pagelist,
+ u8 order)
{
struct folio *src, *tmp;
pte_t *_pte;
pte_t pteval;
- for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
- _pte++, address += PAGE_SIZE) {
+ for (_pte = pte; _pte < pte + (1 << order);
+ _pte++, address += PAGE_SIZE) {
pteval = ptep_get(_pte);
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
@@ -764,7 +767,8 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
pmd_t *pmd,
pmd_t orig_pmd,
struct vm_area_struct *vma,
- struct list_head *compound_pagelist)
+ struct list_head *compound_pagelist,
+ u8 order)
{
spinlock_t *pmd_ptl;
@@ -781,7 +785,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
* Release both raw and compound pages isolated
* in __collapse_huge_page_isolate.
*/
- release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
+ release_pte_pages(pte, pte + (1 << order), compound_pagelist);
}
/*
@@ -802,7 +806,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
unsigned long address, spinlock_t *ptl,
- struct list_head *compound_pagelist)
+ struct list_head *compound_pagelist, u8 order)
{
unsigned int i;
int result = SCAN_SUCCEED;
@@ -810,7 +814,7 @@ static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
/*
* Copying pages' contents is subject to memory poison at any iteration.
*/
- for (i = 0; i < HPAGE_PMD_NR; i++) {
+ for (i = 0; i < (1 << order); i++) {
pte_t pteval = ptep_get(pte + i);
struct page *page = folio_page(folio, i);
unsigned long src_addr = address + i * PAGE_SIZE;
@@ -829,10 +833,10 @@ static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
if (likely(result == SCAN_SUCCEED))
__collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
- compound_pagelist);
+ compound_pagelist, order);
else
__collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
- compound_pagelist);
+ compound_pagelist, order);
return result;
}
@@ -1000,11 +1004,11 @@ static int check_pmd_still_valid(struct mm_struct *mm,
static int __collapse_huge_page_swapin(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long haddr, pmd_t *pmd,
- int referenced)
+ int referenced, u8 order)
{
int swapped_in = 0;
vm_fault_t ret = 0;
- unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
+ unsigned long address, end = haddr + (PAGE_SIZE << order);
int result;
pte_t *pte = NULL;
spinlock_t *ptl;
@@ -1035,6 +1039,11 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
if (!is_swap_pte(vmf.orig_pte))
continue;
+ if (order != HPAGE_PMD_ORDER) {
+ result = SCAN_EXCEED_SWAP_PTE;
+ goto out;
+ }
+
vmf.pte = pte;
vmf.ptl = ptl;
ret = do_swap_page(&vmf);
@@ -1114,7 +1123,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
int result = SCAN_FAIL;
struct vm_area_struct *vma;
struct mmu_notifier_range range;
-
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
/*
@@ -1149,7 +1157,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* that case. Continuing to collapse causes inconsistency.
*/
result = __collapse_huge_page_swapin(mm, vma, address, pmd,
- referenced);
+ referenced, HPAGE_PMD_ORDER);
if (result != SCAN_SUCCEED)
goto out_nolock;
}
@@ -1196,7 +1204,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
if (pte) {
result = __collapse_huge_page_isolate(vma, address, pte, cc,
- &compound_pagelist);
+ &compound_pagelist, HPAGE_PMD_ORDER);
spin_unlock(pte_ptl);
} else {
result = SCAN_PMD_NULL;
@@ -1226,7 +1234,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
vma, address, pte_ptl,
- &compound_pagelist);
+ &compound_pagelist, HPAGE_PMD_ORDER);
pte_unmap(pte);
if (unlikely(result != SCAN_SUCCEED))
goto out_up_write;
--
2.48.1
Powered by blists - more mailing lists