[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260113014130.922385-1-ye.liu@linux.dev>
Date: Tue, 13 Jan 2026 09:41:29 +0800
From: Ye Liu <ye.liu@...ux.dev>
To: Andrew Morton <akpm@...ux-foundation.org>,
David Hildenbrand <david@...nel.org>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
Cc: Ye Liu <liuye@...inos.cn>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Michal Hocko <mhocko@...e.com>,
Zi Yan <ziy@...dia.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Nico Pache <npache@...hat.com>,
Ryan Roberts <ryan.roberts@....com>,
Dev Jain <dev.jain@....com>,
Barry Song <baohua@...nel.org>,
Lance Yang <lance.yang@...ux.dev>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
linux-fsdevel@...r.kernel.org
Subject: [PATCH] mm: remove redundant page parameter from do_set_pmd()
From: Ye Liu <liuye@...inos.cn>
The page parameter passed to do_set_pmd() was always overwritten with
&folio->page immediately upon function entry (line 5369 in memory.c),
making the parameter completely redundant. This confused callers who
computed different page values only to have them ignored.
Changes:
- Convert page from a function parameter to a local variable
- Update function signature in both implementations and stub
- Remove unnecessary folio_file_page() calculation in filemap.c
- Update all three call sites to remove the page argument
This simplifies the API since folio already contains all the page
information needed. The function still uses &folio->page internally
for cache flushing and rmap operations.
Signed-off-by: Ye Liu <liuye@...inos.cn>
---
include/linux/mm.h | 2 +-
mm/filemap.c | 3 +--
mm/khugepaged.c | 6 +++---
mm/memory.c | 7 ++++---
4 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 710d20fc954b..cb1fe75575c3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1490,7 +1490,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page);
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio);
void set_pte_range(struct vm_fault *vmf, struct folio *folio,
struct page *page, unsigned int nr, unsigned long addr);
diff --git a/mm/filemap.c b/mm/filemap.c
index ebd75684cb0a..4be5f3f5b8d6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3678,8 +3678,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
}
if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
- struct page *page = folio_file_page(folio, start);
- vm_fault_t ret = do_set_pmd(vmf, folio, page);
+ vm_fault_t ret = do_set_pmd(vmf, folio);
if (!ret) {
/* The page is mapped successfully, reference consumed. */
folio_unlock(folio);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 9f790ec34400..2d7b23efa11b 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1442,7 +1442,7 @@ static void collect_mm_slot(struct mm_slot *slot)
/* folio must be locked, and mmap_lock must be held */
static enum scan_result set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmdp, struct folio *folio, struct page *page)
+ pmd_t *pmdp, struct folio *folio)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_fault vmf = {
@@ -1470,7 +1470,7 @@ static enum scan_result set_huge_pmd(struct vm_area_struct *vma, unsigned long a
}
vmf.pmd = pmdp;
- if (do_set_pmd(&vmf, folio, page))
+ if (do_set_pmd(&vmf, folio))
return SCAN_FAIL;
folio_get(folio);
@@ -1678,7 +1678,7 @@ static enum scan_result try_collapse_pte_mapped_thp(struct mm_struct *mm, unsign
maybe_install_pmd:
/* step 5: install pmd entry */
result = install_pmd
- ? set_huge_pmd(vma, haddr, pmd, folio, &folio->page)
+ ? set_huge_pmd(vma, haddr, pmd, folio)
: SCAN_SUCCEED;
goto drop_folio;
abort:
diff --git a/mm/memory.c b/mm/memory.c
index 30a897018482..8b29ecbfe7fa 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5342,8 +5342,9 @@ static void deposit_prealloc_pte(struct vm_fault *vmf)
vmf->prealloc_pte = NULL;
}
-vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page)
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio)
{
+ struct page *page;
struct vm_area_struct *vma = vmf->vma;
bool write = vmf->flags & FAULT_FLAG_WRITE;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
@@ -5418,7 +5419,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *pa
return ret;
}
#else
-vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page)
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio)
{
return VM_FAULT_FALLBACK;
}
@@ -5542,7 +5543,7 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
if (pmd_none(*vmf->pmd)) {
if (!needs_fallback && folio_test_pmd_mappable(folio)) {
- ret = do_set_pmd(vmf, folio, page);
+ ret = do_set_pmd(vmf, folio);
if (ret != VM_FAULT_FALLBACK)
return ret;
}
--
2.43.0
Powered by blists - more mailing lists