[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250211111326.14295-5-dev.jain@arm.com>
Date: Tue, 11 Feb 2025 16:43:13 +0530
From: Dev Jain <dev.jain@....com>
To: akpm@...ux-foundation.org,
david@...hat.com,
willy@...radead.org,
kirill.shutemov@...ux.intel.com
Cc: npache@...hat.com,
ryan.roberts@....com,
anshuman.khandual@....com,
catalin.marinas@....com,
cl@...two.org,
vbabka@...e.cz,
mhocko@...e.com,
apopple@...dia.com,
dave.hansen@...ux.intel.com,
will@...nel.org,
baohua@...nel.org,
jack@...e.cz,
srivatsa@...il.mit.edu,
haowenchao22@...il.com,
hughd@...gle.com,
aneesh.kumar@...nel.org,
yang@...amperecomputing.com,
peterx@...hat.com,
ioworker0@...il.com,
wangkefeng.wang@...wei.com,
ziy@...dia.com,
jglisse@...gle.com,
surenb@...gle.com,
vishal.moola@...il.com,
zokeefe@...gle.com,
zhengqi.arch@...edance.com,
jhubbard@...dia.com,
21cnbao@...il.com,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Dev Jain <dev.jain@....com>
Subject: [PATCH v2 04/17] khugepaged: Generalize __collapse_huge_page_isolate()
Scale down the scan range and the sysfs tunables (to be changed in subsequent patches)
according to the scan order, and isolate the folios.
Signed-off-by: Dev Jain <dev.jain@....com>
---
mm/khugepaged.c | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 221823c0d95f..0ea99df115cb 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -565,15 +565,17 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
unsigned long address,
pte_t *pte,
struct collapse_control *cc,
- struct list_head *compound_pagelist)
+ struct list_head *compound_pagelist,
+ int order)
{
- struct page *page = NULL;
struct folio *folio = NULL;
pte_t *_pte;
int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
bool writable = false;
+ unsigned int max_ptes_shared = khugepaged_max_ptes_shared >> (HPAGE_PMD_ORDER - order);
+ unsigned int max_ptes_none = khugepaged_max_ptes_none >> (HPAGE_PMD_ORDER - order);
- for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
+ for (_pte = pte; _pte < pte + (1UL << order);
_pte++, address += PAGE_SIZE) {
pte_t pteval = ptep_get(_pte);
if (pte_none(pteval) || (pte_present(pteval) &&
@@ -581,7 +583,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
++none_or_zero;
if (!userfaultfd_armed(vma) &&
(!cc->is_khugepaged ||
- none_or_zero <= khugepaged_max_ptes_none)) {
+ none_or_zero <= max_ptes_none)) {
continue;
} else {
result = SCAN_EXCEED_NONE_PTE;
@@ -597,20 +599,19 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
result = SCAN_PTE_UFFD_WP;
goto out;
}
- page = vm_normal_page(vma, address, pteval);
- if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
+ folio = vm_normal_folio(vma, address, pteval);
+ if (unlikely(!folio) || unlikely(folio_is_zone_device(folio))) {
result = SCAN_PAGE_NULL;
goto out;
}
- folio = page_folio(page);
VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
/* See hpage_collapse_scan_pmd(). */
if (folio_likely_mapped_shared(folio)) {
++shared;
if (cc->is_khugepaged &&
- shared > khugepaged_max_ptes_shared) {
+ shared > max_ptes_shared) {
result = SCAN_EXCEED_SHARED_PTE;
count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
goto out;
@@ -1201,7 +1202,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
if (pte) {
result = __collapse_huge_page_isolate(vma, address, pte, cc,
- &compound_pagelist);
+ &compound_pagelist, HPAGE_PMD_ORDER);
spin_unlock(pte_ptl);
} else {
result = SCAN_PMD_NULL;
--
2.30.2
Powered by blists - more mailing lists