[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251224111351.41042-12-shivankg@amd.com>
Date: Wed, 24 Dec 2025 11:13:57 +0000
From: Shivank Garg <shivankg@....com>
To: Andrew Morton <akpm@...ux-foundation.org>, David Hildenbrand
<david@...nel.org>, Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
CC: Zi Yan <ziy@...dia.com>, Baolin Wang <baolin.wang@...ux.alibaba.com>,
"Liam R . Howlett" <Liam.Howlett@...cle.com>, Nico Pache <npache@...hat.com>,
Ryan Roberts <ryan.roberts@....com>, Dev Jain <dev.jain@....com>, Barry Song
<baohua@...nel.org>, Lance Yang <lance.yang@...ux.dev>, <linux-mm@...ck.org>,
<linux-kernel@...r.kernel.org>, <shivankg@....com>
Subject: [PATCH V2 4/5] mm/khugepaged: use enum scan_result for result variables and return types
Convert result variables and return types from int to enum scan_result
throughout khugepaged code. This improves type safety and code clarity
by making the intent explicit.
No functional change.
Signed-off-by: Shivank Garg <shivankg@....com>
---
mm/khugepaged.c | 111 +++++++++++++++++++++++++-----------------------
1 file changed, 57 insertions(+), 54 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 04ff0730c9a1..6892b23d6fc4 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -537,17 +537,18 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
}
}
-static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
- unsigned long start_addr,
- pte_t *pte,
- struct collapse_control *cc,
- struct list_head *compound_pagelist)
+static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
+ unsigned long start_addr,
+ pte_t *pte,
+ struct collapse_control *cc,
+ struct list_head *compound_pagelist)
{
struct page *page = NULL;
struct folio *folio = NULL;
unsigned long addr = start_addr;
pte_t *_pte;
- int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
+ int none_or_zero = 0, shared = 0, referenced = 0;
+ enum scan_result result = SCAN_FAIL;
for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
_pte++, addr += PAGE_SIZE) {
@@ -780,13 +781,13 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
* @ptl: lock on raw pages' PTEs
* @compound_pagelist: list that stores compound pages
*/
-static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
+static enum scan_result __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
unsigned long address, spinlock_t *ptl,
struct list_head *compound_pagelist)
{
unsigned int i;
- int result = SCAN_SUCCEED;
+ enum scan_result result = SCAN_SUCCEED;
/*
* Copying pages' contents is subject to memory poison at any iteration.
@@ -898,10 +899,9 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
* Returns enum scan_result value.
*/
-static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
- bool expect_anon,
- struct vm_area_struct **vmap,
- struct collapse_control *cc)
+static enum scan_result hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
+ bool expect_anon, struct vm_area_struct **vmap,
+ struct collapse_control *cc)
{
struct vm_area_struct *vma;
enum tva_type type = cc->is_khugepaged ? TVA_KHUGEPAGED :
@@ -930,7 +930,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
return SCAN_SUCCEED;
}
-static inline int check_pmd_state(pmd_t *pmd)
+static inline enum scan_result check_pmd_state(pmd_t *pmd)
{
pmd_t pmde = pmdp_get_lockless(pmd);
@@ -953,9 +953,9 @@ static inline int check_pmd_state(pmd_t *pmd)
return SCAN_SUCCEED;
}
-static int find_pmd_or_thp_or_none(struct mm_struct *mm,
- unsigned long address,
- pmd_t **pmd)
+static enum scan_result find_pmd_or_thp_or_none(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t **pmd)
{
*pmd = mm_find_pmd(mm, address);
if (!*pmd)
@@ -964,12 +964,12 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
return check_pmd_state(*pmd);
}
-static int check_pmd_still_valid(struct mm_struct *mm,
- unsigned long address,
- pmd_t *pmd)
+static enum scan_result check_pmd_still_valid(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmd)
{
pmd_t *new_pmd;
- int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
+ enum scan_result result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
if (result != SCAN_SUCCEED)
return result;
@@ -985,15 +985,15 @@ static int check_pmd_still_valid(struct mm_struct *mm,
* Called and returns without pte mapped or spinlocks held.
* Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
*/
-static int __collapse_huge_page_swapin(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long start_addr, pmd_t *pmd,
- int referenced)
+static enum scan_result __collapse_huge_page_swapin(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long start_addr, pmd_t *pmd,
+ int referenced)
{
int swapped_in = 0;
vm_fault_t ret = 0;
unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
- int result;
+ enum scan_result result;
pte_t *pte = NULL;
spinlock_t *ptl;
@@ -1062,8 +1062,8 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
return result;
}
-static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
- struct collapse_control *cc)
+static enum scan_result alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
+ struct collapse_control *cc)
{
gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
GFP_TRANSHUGE);
@@ -1090,9 +1090,9 @@ static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
return SCAN_SUCCEED;
}
-static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
- int referenced, int unmapped,
- struct collapse_control *cc)
+static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long address,
+ int referenced, int unmapped,
+ struct collapse_control *cc)
{
LIST_HEAD(compound_pagelist);
pmd_t *pmd, _pmd;
@@ -1100,7 +1100,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
pgtable_t pgtable;
struct folio *folio;
spinlock_t *pmd_ptl, *pte_ptl;
- int result = SCAN_FAIL;
+ enum scan_result result = SCAN_FAIL;
struct vm_area_struct *vma;
struct mmu_notifier_range range;
@@ -1246,15 +1246,15 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
return result;
}
-static int hpage_collapse_scan_pmd(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long start_addr, bool *mmap_locked,
- struct collapse_control *cc)
+static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long start_addr, bool *mmap_locked,
+ struct collapse_control *cc)
{
pmd_t *pmd;
pte_t *pte, *_pte;
- int result = SCAN_FAIL, referenced = 0;
- int none_or_zero = 0, shared = 0;
+ int none_or_zero = 0, shared = 0, referenced = 0;
+ enum scan_result result = SCAN_FAIL;
struct page *page = NULL;
struct folio *folio = NULL;
unsigned long addr;
@@ -1441,8 +1441,8 @@ static void collect_mm_slot(struct mm_slot *slot)
}
/* folio must be locked, and mmap_lock must be held */
-static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmdp, struct folio *folio, struct page *page)
+static enum scan_result set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmdp, struct folio *folio, struct page *page)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_fault vmf = {
@@ -1477,10 +1477,11 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
return SCAN_SUCCEED;
}
-static int try_collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
- bool install_pmd)
+static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd)
{
- int nr_mapped_ptes = 0, result = SCAN_FAIL;
+ enum scan_result result = SCAN_FAIL;
+ int nr_mapped_ptes = 0;
unsigned int nr_batch_ptes;
struct mmu_notifier_range range;
bool notified = false;
@@ -1862,9 +1863,9 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* + unlock old pages
* + unlock and free huge page;
*/
-static int collapse_file(struct mm_struct *mm, unsigned long addr,
- struct file *file, pgoff_t start,
- struct collapse_control *cc)
+static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr,
+ struct file *file, pgoff_t start,
+ struct collapse_control *cc)
{
struct address_space *mapping = file->f_mapping;
struct page *dst;
@@ -1872,7 +1873,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
- int nr_none = 0, result = SCAN_SUCCEED;
+ enum scan_result result = SCAN_SUCCEED;
+ int nr_none = 0;
bool is_shmem = shmem_file(file);
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
@@ -2293,16 +2295,16 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
return result;
}
-static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
- struct file *file, pgoff_t start,
- struct collapse_control *cc)
+static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
+ struct file *file, pgoff_t start,
+ struct collapse_control *cc)
{
struct folio *folio = NULL;
struct address_space *mapping = file->f_mapping;
XA_STATE(xas, &mapping->i_pages, start);
int present, swap;
int node = NUMA_NO_NODE;
- int result = SCAN_SUCCEED;
+ enum scan_result result = SCAN_SUCCEED;
present = 0;
swap = 0;
@@ -2400,7 +2402,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
return result;
}
-static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
+static unsigned int khugepaged_scan_mm_slot(unsigned int pages, enum scan_result *result,
struct collapse_control *cc)
__releases(&khugepaged_mm_lock)
__acquires(&khugepaged_mm_lock)
@@ -2561,7 +2563,7 @@ static void khugepaged_do_scan(struct collapse_control *cc)
unsigned int progress = 0, pass_through_head = 0;
unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
bool wait = true;
- int result = SCAN_SUCCEED;
+ enum scan_result result = SCAN_SUCCEED;
lru_add_drain_all();
@@ -2774,7 +2776,8 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
struct collapse_control *cc;
struct mm_struct *mm = vma->vm_mm;
unsigned long hstart, hend, addr;
- int thps = 0, last_fail = SCAN_FAIL;
+ enum scan_result last_fail = SCAN_FAIL;
+ int thps = 0;
bool mmap_locked = true;
BUG_ON(vma->vm_start > start);
@@ -2796,7 +2799,7 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
bool retried = false;
- int result = SCAN_FAIL;
+ enum scan_result result = SCAN_FAIL;
if (!mmap_locked) {
retry:
--
2.43.0
Powered by blists - more mailing lists