[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <6a2f28b4541bbbc56ea9e07f24b67cef87899a50.1755677674.git.baolin.wang@linux.alibaba.com>
Date: Wed, 20 Aug 2025 17:07:13 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: akpm@...ux-foundation.org,
hughd@...gle.com,
david@...hat.com,
lorenzo.stoakes@...cle.com
Cc: ziy@...dia.com,
Liam.Howlett@...cle.com,
npache@...hat.com,
ryan.roberts@....com,
dev.jain@....com,
baohua@...nel.org,
baolin.wang@...ux.alibaba.com,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH 02/11] mm: khugepaged: generalize collapse_file for mTHP support
Generalize the order of the collapse_file() function to support future
file/shmem mTHP collapse.
No functional changes in this patch.
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
mm/khugepaged.c | 20 +++++++++++---------
1 file changed, 11 insertions(+), 9 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 5d4493b77f3c..e64ed86d28ca 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2064,21 +2064,23 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
*/
static int collapse_file(struct mm_struct *mm, unsigned long addr,
struct file *file, pgoff_t start,
- struct collapse_control *cc)
+ struct collapse_control *cc,
+ int order)
{
struct address_space *mapping = file->f_mapping;
struct page *dst;
struct folio *folio, *tmp, *new_folio;
- pgoff_t index = 0, end = start + HPAGE_PMD_NR;
+ int nr_pages = 1 << order;
+ pgoff_t index = 0, end = start + nr_pages;
LIST_HEAD(pagelist);
- XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
+ XA_STATE_ORDER(xas, &mapping->i_pages, start, order);
int nr_none = 0, result = SCAN_SUCCEED;
bool is_shmem = shmem_file(file);
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
- VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
+ VM_BUG_ON(start & (nr_pages - 1));
- result = alloc_charge_folio(&new_folio, mm, cc, HPAGE_PMD_ORDER);
+ result = alloc_charge_folio(&new_folio, mm, cc, order);
if (result != SCAN_SUCCEED)
goto out;
@@ -2426,14 +2428,14 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
* unwritten page.
*/
folio_mark_uptodate(new_folio);
- folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
+ folio_ref_add(new_folio, nr_pages - 1);
if (is_shmem)
folio_mark_dirty(new_folio);
folio_add_lru(new_folio);
/* Join all the small entries into a single multi-index entry. */
- xas_set_order(&xas, start, HPAGE_PMD_ORDER);
+ xas_set_order(&xas, start, order);
xas_store(&xas, new_folio);
WARN_ON_ONCE(xas_error(&xas));
xas_unlock_irq(&xas);
@@ -2496,7 +2498,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
folio_put(new_folio);
out:
VM_BUG_ON(!list_empty(&pagelist));
- trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, HPAGE_PMD_NR, result);
+ trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, nr_pages, result);
return result;
}
@@ -2599,7 +2601,7 @@ static int collapse_scan_file(struct mm_struct *mm, unsigned long addr,
result = SCAN_EXCEED_NONE_PTE;
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
} else {
- result = collapse_file(mm, addr, file, start, cc);
+ result = collapse_file(mm, addr, file, start, cc, HPAGE_PMD_ORDER);
}
}
--
2.43.5
Powered by blists - more mailing lists