[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250515032226.128900-4-npache@redhat.com>
Date: Wed, 14 May 2025 21:22:17 -0600
From: Nico Pache <npache@...hat.com>
To: linux-mm@...ck.org,
linux-doc@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org
Cc: david@...hat.com,
ziy@...dia.com,
baolin.wang@...ux.alibaba.com,
lorenzo.stoakes@...cle.com,
Liam.Howlett@...cle.com,
ryan.roberts@....com,
dev.jain@....com,
corbet@....net,
rostedt@...dmis.org,
mhiramat@...nel.org,
mathieu.desnoyers@...icios.com,
akpm@...ux-foundation.org,
baohua@...nel.org,
willy@...radead.org,
peterx@...hat.com,
wangkefeng.wang@...wei.com,
usamaarif642@...il.com,
sunnanyong@...wei.com,
vishal.moola@...il.com,
thomas.hellstrom@...ux.intel.com,
yang@...amperecomputing.com,
kirill.shutemov@...ux.intel.com,
aarcange@...hat.com,
raquini@...hat.com,
anshuman.khandual@....com,
catalin.marinas@....com,
tiwai@...e.de,
will@...nel.org,
dave.hansen@...ux.intel.com,
jack@...e.cz,
cl@...two.org,
jglisse@...gle.com,
surenb@...gle.com,
zokeefe@...gle.com,
hannes@...xchg.org,
rientjes@...gle.com,
mhocko@...e.com,
rdunlap@...radead.org
Subject: [PATCH v7 03/12] khugepaged: generalize hugepage_vma_revalidate for mTHP support
For khugepaged to support different mTHP orders, we must generalize this
to check if the PMD is not shared by another VMA and the order is
enabled.
No functional change in this patch.
Reviewed-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
Co-developed-by: Dev Jain <dev.jain@....com>
Signed-off-by: Dev Jain <dev.jain@....com>
Signed-off-by: Nico Pache <npache@...hat.com>
---
mm/khugepaged.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 5457571d505a..0c4d6a02d59c 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -920,7 +920,7 @@ static int khugepaged_find_target_node(struct collapse_control *cc)
static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
bool expect_anon,
struct vm_area_struct **vmap,
- struct collapse_control *cc)
+ struct collapse_control *cc, int order)
{
struct vm_area_struct *vma;
unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
@@ -934,7 +934,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
return SCAN_ADDRESS_RANGE;
- if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, order))
return SCAN_VMA_CHECK;
/*
* Anon VMA expected, the address may be unmapped then
@@ -1130,7 +1130,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
goto out_nolock;
mmap_read_lock(mm);
- result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
+ result = hugepage_vma_revalidate(mm, address, true, &vma, cc, HPAGE_PMD_ORDER);
if (result != SCAN_SUCCEED) {
mmap_read_unlock(mm);
goto out_nolock;
@@ -1164,7 +1164,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* mmap_lock.
*/
mmap_write_lock(mm);
- result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
+ result = hugepage_vma_revalidate(mm, address, true, &vma, cc, HPAGE_PMD_ORDER);
if (result != SCAN_SUCCEED)
goto out_up_write;
/* check if the pmd is still valid */
@@ -2782,7 +2782,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
mmap_read_lock(mm);
mmap_locked = true;
result = hugepage_vma_revalidate(mm, addr, false, &vma,
- cc);
+ cc, HPAGE_PMD_ORDER);
if (result != SCAN_SUCCEED) {
last_fail = result;
goto out_nolock;
--
2.49.0
Powered by blists - more mailing lists