[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250108233128.14484-10-npache@redhat.com>
Date: Wed, 8 Jan 2025 16:31:25 -0700
From: Nico Pache <npache@...hat.com>
To: linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Cc: ryan.roberts@....com,
anshuman.khandual@....com,
catalin.marinas@....com,
cl@...two.org,
vbabka@...e.cz,
mhocko@...e.com,
apopple@...dia.com,
dave.hansen@...ux.intel.com,
will@...nel.org,
baohua@...nel.org,
jack@...e.cz,
srivatsa@...il.mit.edu,
haowenchao22@...il.com,
hughd@...gle.com,
aneesh.kumar@...nel.org,
yang@...amperecomputing.com,
peterx@...hat.com,
ioworker0@...il.com,
wangkefeng.wang@...wei.com,
ziy@...dia.com,
jglisse@...gle.com,
surenb@...gle.com,
vishal.moola@...il.com,
zokeefe@...gle.com,
zhengqi.arch@...edance.com,
jhubbard@...dia.com,
21cnbao@...il.com,
willy@...radead.org,
kirill.shutemov@...ux.intel.com,
david@...hat.com,
aarcange@...hat.com,
raquini@...hat.com,
dev.jain@....com,
sunnanyong@...wei.com,
usamaarif642@...il.com,
audra@...hat.com,
akpm@...ux-foundation.org
Subject: [RFC 09/11] khugepaged: add mTHP support
Introduce the ability for khugepaged to collapse to different mTHP sizes.
While scanning a PMD range for potential hugepage collapse, track pages
in MIN_MTHP_ORDER chunks. Each bit represents a fully utilized region of
order MIN_MTHP_ORDER ptes.
With this bitmap we can determine which mTHP sizes would be the most
efficient to collapse to if the PMD collapse is not suitible.
Signed-off-by: Nico Pache <npache@...hat.com>
---
mm/khugepaged.c | 111 +++++++++++++++++++++++++++++++++---------------
1 file changed, 77 insertions(+), 34 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index de1dc6ea3c71..4d3c560f20b4 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1139,13 +1139,14 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
{
LIST_HEAD(compound_pagelist);
pmd_t *pmd, _pmd;
- pte_t *pte;
+ pte_t *pte, mthp_pte;
pgtable_t pgtable;
struct folio *folio;
spinlock_t *pmd_ptl, *pte_ptl;
int result = SCAN_FAIL;
struct vm_area_struct *vma;
struct mmu_notifier_range range;
+ unsigned long _address = address + offset * PAGE_SIZE;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
/* if collapsing mTHPs we may have already released the read_lock, and
@@ -1162,12 +1163,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
mmap_read_unlock(mm);
*mmap_locked = false;
- result = alloc_charge_folio(&folio, mm, cc, HPAGE_PMD_ORDER);
+ result = alloc_charge_folio(&folio, mm, cc, order);
if (result != SCAN_SUCCEED)
goto out_nolock;
mmap_read_lock(mm);
- result = hugepage_vma_revalidate(mm, address, true, &vma, cc, HPAGE_PMD_ORDER);
+ *mmap_locked = true;
+ result = hugepage_vma_revalidate(mm, address, true, &vma, cc, order);
if (result != SCAN_SUCCEED) {
mmap_read_unlock(mm);
goto out_nolock;
@@ -1185,13 +1187,14 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* released when it fails. So we jump out_nolock directly in
* that case. Continuing to collapse causes inconsistency.
*/
- result = __collapse_huge_page_swapin(mm, vma, address, pmd,
- referenced, HPAGE_PMD_ORDER);
+ result = __collapse_huge_page_swapin(mm, vma, _address, pmd,
+ referenced, order);
if (result != SCAN_SUCCEED)
goto out_nolock;
}
mmap_read_unlock(mm);
+ *mmap_locked = false;
/*
* Prevent all access to pagetables with the exception of
* gup_fast later handled by the ptep_clear_flush and the VM
@@ -1201,7 +1204,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* mmap_lock.
*/
mmap_write_lock(mm);
- result = hugepage_vma_revalidate(mm, address, true, &vma, cc, HPAGE_PMD_ORDER);
+ result = hugepage_vma_revalidate(mm, address, true, &vma, cc, order);
if (result != SCAN_SUCCEED)
goto out_up_write;
/* check if the pmd is still valid */
@@ -1212,11 +1215,12 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
vma_start_write(vma);
anon_vma_lock_write(vma->anon_vma);
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
- address + HPAGE_PMD_SIZE);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, _address,
+ _address + (PAGE_SIZE << order));
mmu_notifier_invalidate_range_start(&range);
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
+
/*
* This removes any huge TLB entry from the CPU so we won't allow
* huge and small TLB entries for the same virtual address to
@@ -1230,10 +1234,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
mmu_notifier_invalidate_range_end(&range);
tlb_remove_table_sync_one();
- pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
+ pte = pte_offset_map_lock(mm, &_pmd, _address, &pte_ptl);
if (pte) {
- result = __collapse_huge_page_isolate(vma, address, pte, cc,
- &compound_pagelist, HPAGE_PMD_ORDER);
+ result = __collapse_huge_page_isolate(vma, _address, pte, cc,
+ &compound_pagelist, order);
spin_unlock(pte_ptl);
} else {
result = SCAN_PMD_NULL;
@@ -1262,8 +1266,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
anon_vma_unlock_write(vma->anon_vma);
result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
- vma, address, pte_ptl,
- &compound_pagelist, HPAGE_PMD_ORDER);
+ vma, _address, pte_ptl,
+ &compound_pagelist, order);
pte_unmap(pte);
if (unlikely(result != SCAN_SUCCEED))
goto out_up_write;
@@ -1274,20 +1278,37 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* write.
*/
__folio_mark_uptodate(folio);
- pgtable = pmd_pgtable(_pmd);
-
- _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
- _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
-
- spin_lock(pmd_ptl);
- BUG_ON(!pmd_none(*pmd));
- folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
- folio_add_lru_vma(folio, vma);
- pgtable_trans_huge_deposit(mm, pmd, pgtable);
- set_pmd_at(mm, address, pmd, _pmd);
- update_mmu_cache_pmd(vma, address, pmd);
- deferred_split_folio(folio, false);
- spin_unlock(pmd_ptl);
+ if (order == HPAGE_PMD_ORDER) {
+ pgtable = pmd_pgtable(_pmd);
+ _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
+ _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
+
+ spin_lock(pmd_ptl);
+ BUG_ON(!pmd_none(*pmd));
+ folio_add_new_anon_rmap(folio, vma, _address, RMAP_EXCLUSIVE);
+ folio_add_lru_vma(folio, vma);
+ pgtable_trans_huge_deposit(mm, pmd, pgtable);
+ set_pmd_at(mm, address, pmd, _pmd);
+ update_mmu_cache_pmd(vma, address, pmd);
+ deferred_split_folio(folio, false);
+ spin_unlock(pmd_ptl);
+ } else { //mTHP
+ mthp_pte = mk_pte(&folio->page, vma->vm_page_prot);
+ mthp_pte = maybe_mkwrite(pte_mkdirty(mthp_pte), vma);
+
+ spin_lock(pmd_ptl);
+ folio_ref_add(folio, (1 << order) - 1);
+ folio_add_new_anon_rmap(folio, vma, _address, RMAP_EXCLUSIVE);
+ folio_add_lru_vma(folio, vma);
+ spin_lock(pte_ptl);
+ set_ptes(vma->vm_mm, _address, pte, mthp_pte, (1 << order));
+ update_mmu_cache_range(NULL, vma, _address, pte, (1 << order));
+ spin_unlock(pte_ptl);
+ smp_wmb(); /* make pte visible before pmd */
+ pmd_populate(mm, pmd, pmd_pgtable(_pmd));
+ deferred_split_folio(folio, false);
+ spin_unlock(pmd_ptl);
+ }
folio = NULL;
@@ -1367,21 +1388,26 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
{
pmd_t *pmd;
pte_t *pte, *_pte;
+ int i;
int result = SCAN_FAIL, referenced = 0;
int none_or_zero = 0, shared = 0;
struct page *page = NULL;
struct folio *folio = NULL;
unsigned long _address;
+ unsigned long enabled_orders;
spinlock_t *ptl;
int node = NUMA_NO_NODE, unmapped = 0;
bool writable = false;
-
+ bool all_valid = true;
+ unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
result = find_pmd_or_thp_or_none(mm, address, &pmd);
if (result != SCAN_SUCCEED)
goto out;
+ bitmap_zero(cc->mthp_bitmap, 1 << (HPAGE_PMD_ORDER - MIN_MTHP_ORDER));
+ bitmap_zero(cc->mthp_bitmap_temp, 1 << (HPAGE_PMD_ORDER - MIN_MTHP_ORDER));
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -1390,8 +1416,12 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
goto out;
}
- for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
- _pte++, _address += PAGE_SIZE) {
+ for (i = 0; i < HPAGE_PMD_NR; i++) {
+ if (i % MIN_MTHP_NR == 0)
+ all_valid = true;
+
+ _pte = pte + i;
+ _address = address + i * PAGE_SIZE;
pte_t pteval = ptep_get(_pte);
if (is_swap_pte(pteval)) {
++unmapped;
@@ -1414,6 +1444,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
}
}
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
+ all_valid = false;
++none_or_zero;
if (!userfaultfd_armed(vma) &&
(!cc->is_khugepaged ||
@@ -1514,7 +1545,15 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
address)))
referenced++;
+
+ /*
+ * we are reading in MIN_MTHP_NR page chunks. if there are no empty
+ * pages keep track of it in the bitmap for mTHP collapsing.
+ */
+ if (all_valid && (i + 1) % MIN_MTHP_NR == 0)
+ bitmap_set(cc->mthp_bitmap, i / MIN_MTHP_NR, 1);
}
+
if (!writable) {
result = SCAN_PAGE_RO;
} else if (cc->is_khugepaged &&
@@ -1527,10 +1566,12 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
out_unmap:
pte_unmap_unlock(pte, ptl);
if (result == SCAN_SUCCEED) {
- result = collapse_huge_page(mm, address, referenced,
- unmapped, cc, mmap_locked, HPAGE_PMD_ORDER, 0);
- /* collapse_huge_page will return with the mmap_lock released */
- *mmap_locked = false;
+ enabled_orders = thp_vma_allowable_orders(vma, vma->vm_flags,
+ tva_flags, THP_ORDERS_ALL_ANON);
+ result = khugepaged_scan_bitmap(mm, address, referenced, unmapped, cc,
+ mmap_locked, enabled_orders);
+ if (result > 0)
+ result = SCAN_SUCCEED;
}
out:
trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
@@ -2477,11 +2518,13 @@ static int khugepaged_collapse_single_pmd(unsigned long addr, struct mm_struct *
fput(file);
if (result == SCAN_PTE_MAPPED_HUGEPAGE) {
mmap_read_lock(mm);
+ *mmap_locked = true;
if (khugepaged_test_exit_or_disable(mm))
goto end;
result = collapse_pte_mapped_thp(mm, addr,
!cc->is_khugepaged);
mmap_read_unlock(mm);
+ *mmap_locked = false;
}
} else {
result = khugepaged_scan_pmd(mm, vma, addr,
--
2.47.1
Powered by blists - more mailing lists