[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250211003028.213461-2-npache@redhat.com>
Date: Mon, 10 Feb 2025 17:30:20 -0700
From: Nico Pache <npache@...hat.com>
To: linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org,
linux-mm@...ck.org
Cc: ryan.roberts@....com,
anshuman.khandual@....com,
catalin.marinas@....com,
cl@...two.org,
vbabka@...e.cz,
mhocko@...e.com,
apopple@...dia.com,
dave.hansen@...ux.intel.com,
will@...nel.org,
baohua@...nel.org,
jack@...e.cz,
srivatsa@...il.mit.edu,
haowenchao22@...il.com,
hughd@...gle.com,
aneesh.kumar@...nel.org,
yang@...amperecomputing.com,
peterx@...hat.com,
ioworker0@...il.com,
wangkefeng.wang@...wei.com,
ziy@...dia.com,
jglisse@...gle.com,
surenb@...gle.com,
vishal.moola@...il.com,
zokeefe@...gle.com,
zhengqi.arch@...edance.com,
jhubbard@...dia.com,
21cnbao@...il.com,
willy@...radead.org,
kirill.shutemov@...ux.intel.com,
david@...hat.com,
aarcange@...hat.com,
raquini@...hat.com,
dev.jain@....com,
sunnanyong@...wei.com,
usamaarif642@...il.com,
audra@...hat.com,
akpm@...ux-foundation.org,
rostedt@...dmis.org,
mathieu.desnoyers@...icios.com,
tiwai@...e.de
Subject: [RFC v2 1/9] introduce khugepaged_collapse_single_pmd to unify khugepaged and madvise_collapse
The khugepaged daemon and madvise_collapse have two different
implementations that do almost the same thing.
Create khugepaged_collapse_single_pmd to increase code
reuse and create an entry point for future khugepaged changes.
Refactor madvise_collapse and khugepaged_scan_mm_slot to use
the new khugepaged_collapse_single_pmd function.
Signed-off-by: Nico Pache <npache@...hat.com>
---
mm/khugepaged.c | 96 +++++++++++++++++++++++++------------------------
1 file changed, 50 insertions(+), 46 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 5f0be134141e..46faee67378b 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2365,6 +2365,52 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
}
#endif
+/*
+ * Try to collapse a single PMD starting at a PMD aligned addr, and return
+ * the results.
+ */
+static int khugepaged_collapse_single_pmd(unsigned long addr, struct mm_struct *mm,
+ struct vm_area_struct *vma, bool *mmap_locked,
+ struct collapse_control *cc)
+{
+ int result = SCAN_FAIL;
+ unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
+
+ if (!*mmap_locked) {
+ mmap_read_lock(mm);
+ *mmap_locked = true;
+ }
+
+ if (thp_vma_allowable_order(vma, vma->vm_flags,
+ tva_flags, PMD_ORDER)) {
+ if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
+ struct file *file = get_file(vma->vm_file);
+ pgoff_t pgoff = linear_page_index(vma, addr);
+
+ mmap_read_unlock(mm);
+ *mmap_locked = false;
+ result = hpage_collapse_scan_file(mm, addr, file, pgoff,
+ cc);
+ fput(file);
+ if (result == SCAN_PTE_MAPPED_HUGEPAGE) {
+ mmap_read_lock(mm);
+ if (hpage_collapse_test_exit_or_disable(mm))
+ goto end;
+ result = collapse_pte_mapped_thp(mm, addr,
+ !cc->is_khugepaged);
+ mmap_read_unlock(mm);
+ }
+ } else {
+ result = hpage_collapse_scan_pmd(mm, vma, addr,
+ mmap_locked, cc);
+ }
+ if (result == SCAN_SUCCEED || result == SCAN_PMD_MAPPED)
+ ++khugepaged_pages_collapsed;
+ }
+end:
+ return result;
+}
+
static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
struct collapse_control *cc)
__releases(&khugepaged_mm_lock)
@@ -2439,33 +2485,9 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
VM_BUG_ON(khugepaged_scan.address < hstart ||
khugepaged_scan.address + HPAGE_PMD_SIZE >
hend);
- if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
- struct file *file = get_file(vma->vm_file);
- pgoff_t pgoff = linear_page_index(vma,
- khugepaged_scan.address);
- mmap_read_unlock(mm);
- mmap_locked = false;
- *result = hpage_collapse_scan_file(mm,
- khugepaged_scan.address, file, pgoff, cc);
- fput(file);
- if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
- mmap_read_lock(mm);
- if (hpage_collapse_test_exit_or_disable(mm))
- goto breakouterloop;
- *result = collapse_pte_mapped_thp(mm,
- khugepaged_scan.address, false);
- if (*result == SCAN_PMD_MAPPED)
- *result = SCAN_SUCCEED;
- mmap_read_unlock(mm);
- }
- } else {
- *result = hpage_collapse_scan_pmd(mm, vma,
- khugepaged_scan.address, &mmap_locked, cc);
- }
-
- if (*result == SCAN_SUCCEED)
- ++khugepaged_pages_collapsed;
+ *result = khugepaged_collapse_single_pmd(khugepaged_scan.address,
+ mm, vma, &mmap_locked, cc);
/* move to next address */
khugepaged_scan.address += HPAGE_PMD_SIZE;
@@ -2785,36 +2807,18 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
mmap_assert_locked(mm);
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
- if (IS_ENABLED(CONFIG_SHMEM) && !vma_is_anonymous(vma)) {
- struct file *file = get_file(vma->vm_file);
- pgoff_t pgoff = linear_page_index(vma, addr);
- mmap_read_unlock(mm);
- mmap_locked = false;
- result = hpage_collapse_scan_file(mm, addr, file, pgoff,
- cc);
- fput(file);
- } else {
- result = hpage_collapse_scan_pmd(mm, vma, addr,
- &mmap_locked, cc);
- }
+ result = khugepaged_collapse_single_pmd(addr, mm, vma, &mmap_locked, cc);
+
if (!mmap_locked)
*prev = NULL; /* Tell caller we dropped mmap_lock */
-handle_result:
switch (result) {
case SCAN_SUCCEED:
case SCAN_PMD_MAPPED:
++thps;
break;
case SCAN_PTE_MAPPED_HUGEPAGE:
- BUG_ON(mmap_locked);
- BUG_ON(*prev);
- mmap_read_lock(mm);
- result = collapse_pte_mapped_thp(mm, addr, true);
- mmap_read_unlock(mm);
- goto handle_result;
- /* Whitelisted set of results where continuing OK */
case SCAN_PMD_NULL:
case SCAN_PTE_NON_PRESENT:
case SCAN_PTE_UFFD_WP:
--
2.48.1
Powered by blists - more mailing lists