[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <2a0c7109-a142-4c0f-925c-c821616ffd8e@amd.com>
Date: Mon, 19 Jan 2026 11:20:48 +0530
From: "Garg, Shivank" <shivankg@....com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: David Hildenbrand <david@...nel.org>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>, Zi Yan <ziy@...dia.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
"Liam R . Howlett" <Liam.Howlett@...cle.com>, Nico Pache
<npache@...hat.com>, Ryan Roberts <ryan.roberts@....com>,
Dev Jain <dev.jain@....com>, Barry Song <baohua@...nel.org>,
Lance Yang <lance.yang@...ux.dev>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH V3 0/5] mm/khugepaged: cleanups and scan limit fix
On 1/19/2026 2:04 AM, Andrew Morton wrote:
> On Sun, 18 Jan 2026 19:22:51 +0000 Shivank Garg <shivankg@....com> wrote:
>
>> This series contains several cleanups for mm/khugepaged.c to improve code
>> readability and type safety, and one functional fix to ensure
>> khugepaged_scan_mm_slot() correctly accounts for small VMAs towards
>> scan limit.
>>
>
> That's a lot of changes to a well-reviewed 24 day old patchset.
>
Sincere apologies for the last minute churn.
>>
>> v3:
>> - Fold mm-khugepaged-count-small-vmas-towards-scan-limit-fix: add comment (Lance)
>> - Remove extern and use two tabs indent (David)
>
> Are you sure? The v2->v3 diff is large. A lot of (unchangelogged)
> alterations from `int' to `enum scan_result'.
>
> It all looks pretty simple/straightforward to me but again, can
> reviewers please check this over fairly soonly, thanks.
>
The diff appears large because it somehow did not capture V2 Patch 4/5.
The correct V3 vs V2 diff changes:
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 37b992b22bba..d7a9053ff4fe 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -17,8 +17,8 @@ extern void khugepaged_enter_vma(struct vm_area_struct *vma,
vm_flags_t vm_flags);
extern void khugepaged_min_free_kbytes_update(void);
extern bool current_is_khugepaged(void);
-extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
- bool install_pmd);
+void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd);
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
@@ -43,7 +43,7 @@ static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
{
}
static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr, bool install_pmd)
+ unsigned long addr, bool install_pmd)
{
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9f790ec34400..fba6aea5bea6 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -538,10 +538,8 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
}
static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
- unsigned long start_addr,
- pte_t *pte,
- struct collapse_control *cc,
- struct list_head *compound_pagelist)
+ unsigned long start_addr, pte_t *pte, struct collapse_control *cc,
+ struct list_head *compound_pagelist)
{
struct page *page = NULL;
struct folio *folio = NULL;
@@ -900,8 +898,7 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
*/
static enum scan_result hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
- bool expect_anon, struct vm_area_struct **vmap,
- struct collapse_control *cc)
+ bool expect_anon, struct vm_area_struct **vmap, struct collapse_control *cc)
{
struct vm_area_struct *vma;
enum tva_type type = cc->is_khugepaged ? TVA_KHUGEPAGED :
@@ -954,8 +951,7 @@ static inline enum scan_result check_pmd_state(pmd_t *pmd)
}
static enum scan_result find_pmd_or_thp_or_none(struct mm_struct *mm,
- unsigned long address,
- pmd_t **pmd)
+ unsigned long address, pmd_t **pmd)
{
*pmd = mm_find_pmd(mm, address);
if (!*pmd)
@@ -965,8 +961,7 @@ static enum scan_result find_pmd_or_thp_or_none(struct mm_struct *mm,
}
static enum scan_result check_pmd_still_valid(struct mm_struct *mm,
- unsigned long address,
- pmd_t *pmd)
+ unsigned long address, pmd_t *pmd)
{
pmd_t *new_pmd;
enum scan_result result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
@@ -986,9 +981,8 @@ static enum scan_result check_pmd_still_valid(struct mm_struct *mm,
* Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
*/
static enum scan_result __collapse_huge_page_swapin(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long start_addr, pmd_t *pmd,
- int referenced)
+ struct vm_area_struct *vma, unsigned long start_addr, pmd_t *pmd,
+ int referenced)
{
int swapped_in = 0;
vm_fault_t ret = 0;
@@ -1063,7 +1057,7 @@ static enum scan_result __collapse_huge_page_swapin(struct mm_struct *mm,
}
static enum scan_result alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
- struct collapse_control *cc)
+ struct collapse_control *cc)
{
gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
GFP_TRANSHUGE);
@@ -1091,8 +1085,7 @@ static enum scan_result alloc_charge_folio(struct folio **foliop, struct mm_stru
}
static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long address,
- int referenced, int unmapped,
- struct collapse_control *cc)
+ int referenced, int unmapped, struct collapse_control *cc)
{
LIST_HEAD(compound_pagelist);
pmd_t *pmd, _pmd;
@@ -1247,9 +1240,8 @@ static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long a
}
static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long start_addr, bool *mmap_locked,
- struct collapse_control *cc)
+ struct vm_area_struct *vma, unsigned long start_addr, bool *mmap_locked,
+ struct collapse_control *cc)
{
pmd_t *pmd;
pte_t *pte, *_pte;
@@ -1442,7 +1434,7 @@ static void collect_mm_slot(struct mm_slot *slot)
/* folio must be locked, and mmap_lock must be held */
static enum scan_result set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmdp, struct folio *folio, struct page *page)
+ pmd_t *pmdp, struct folio *folio, struct page *page)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_fault vmf = {
@@ -1478,7 +1470,7 @@ static enum scan_result set_huge_pmd(struct vm_area_struct *vma, unsigned long a
}
static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
- bool install_pmd)
+ bool install_pmd)
{
enum scan_result result = SCAN_FAIL;
int nr_mapped_ptes = 0;
@@ -1713,7 +1705,7 @@ static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsign
* as pmd-mapped. Possibly install a huge PMD mapping the THP.
*/
void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
- bool install_pmd)
+ bool install_pmd)
{
try_collapse_pte_mapped_thp(mm, addr, install_pmd);
}
@@ -1864,8 +1856,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* + unlock and free huge page;
*/
static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr,
- struct file *file, pgoff_t start,
- struct collapse_control *cc)
+ struct file *file, pgoff_t start, struct collapse_control *cc)
{
struct address_space *mapping = file->f_mapping;
struct page *dst;
@@ -2296,8 +2287,7 @@ static enum scan_result collapse_file(struct mm_struct *mm, unsigned long addr,
}
static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
- struct file *file, pgoff_t start,
- struct collapse_control *cc)
+ struct file *file, pgoff_t start, struct collapse_control *cc)
{
struct folio *folio = NULL;
struct address_space *mapping = file->f_mapping;
Powered by blists - more mailing lists