[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250108233128.14484-2-npache@redhat.com>
Date: Wed, 8 Jan 2025 16:31:17 -0700
From: Nico Pache <npache@...hat.com>
To: linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Cc: ryan.roberts@....com,
anshuman.khandual@....com,
catalin.marinas@....com,
cl@...two.org,
vbabka@...e.cz,
mhocko@...e.com,
apopple@...dia.com,
dave.hansen@...ux.intel.com,
will@...nel.org,
baohua@...nel.org,
jack@...e.cz,
srivatsa@...il.mit.edu,
haowenchao22@...il.com,
hughd@...gle.com,
aneesh.kumar@...nel.org,
yang@...amperecomputing.com,
peterx@...hat.com,
ioworker0@...il.com,
wangkefeng.wang@...wei.com,
ziy@...dia.com,
jglisse@...gle.com,
surenb@...gle.com,
vishal.moola@...il.com,
zokeefe@...gle.com,
zhengqi.arch@...edance.com,
jhubbard@...dia.com,
21cnbao@...il.com,
willy@...radead.org,
kirill.shutemov@...ux.intel.com,
david@...hat.com,
aarcange@...hat.com,
raquini@...hat.com,
dev.jain@....com,
sunnanyong@...wei.com,
usamaarif642@...il.com,
audra@...hat.com,
akpm@...ux-foundation.org
Subject: [RFC 01/11] introduce khugepaged_collapse_single_pmd to collapse a single pmd
The khugepaged daemon and madvise_collapse have two different
implementations that do almost the thing.
Create khugepaged_collapse_single_pmd to increase code
reuse and create a entry point for future khugepaged changes.
Signed-off-by: Nico Pache <npache@...hat.com>
---
mm/khugepaged.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 46 insertions(+)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 653dbb1ff05c..4d932839ff1d 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2348,6 +2348,52 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
}
#endif
+/*
+ * Try to collapse a single PMD starting at a PMD aligned addr, and return
+ * the results.
+ */
+static int khugepaged_collapse_single_pmd(unsigned long addr, struct mm_struct *mm,
+ struct vm_area_struct *vma, bool *mmap_locked,
+ struct collapse_control *cc)
+{
+ int result = SCAN_FAIL;
+ unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
+
+ if (!*mmap_locked) {
+ mmap_read_lock(mm);
+ *mmap_locked = true;
+ }
+
+ if (thp_vma_allowable_order(vma, vma->vm_flags,
+ tva_flags, PMD_ORDER)) {
+ if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
+ struct file *file = get_file(vma->vm_file);
+ pgoff_t pgoff = linear_page_index(vma, addr);
+
+ mmap_read_unlock(mm);
+ *mmap_locked = false;
+ result = hpage_collapse_scan_file(mm, addr, file, pgoff,
+ cc);
+ fput(file);
+ if (result == SCAN_PTE_MAPPED_HUGEPAGE) {
+ mmap_read_lock(mm);
+ if (hpage_collapse_test_exit_or_disable(mm))
+ goto end;
+ result = collapse_pte_mapped_thp(mm, addr,
+ !cc->is_khugepaged);
+ mmap_read_unlock(mm);
+ }
+ } else {
+ result = hpage_collapse_scan_pmd(mm, vma, addr,
+ mmap_locked, cc);
+ }
+ if (result == SCAN_SUCCEED || result == SCAN_PMD_MAPPED)
+ ++khugepaged_pages_collapsed;
+ }
+end:
+ return result;
+}
+
static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
struct collapse_control *cc)
__releases(&khugepaged_mm_lock)
--
2.47.1
Powered by blists - more mailing lists