[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250724084441.380404-4-link@vivo.com>
Date: Thu, 24 Jul 2025 16:44:31 +0800
From: Huan Yang <link@...o.com>
To: Andrew Morton <akpm@...ux-foundation.org>,
David Hildenbrand <david@...hat.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Rik van Riel <riel@...riel.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Harry Yoo <harry.yoo@...cle.com>,
Xu Xin <xu.xin16@....com.cn>,
Chengming Zhou <chengming.zhou@...ux.dev>,
Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Michal Hocko <mhocko@...e.com>,
Zi Yan <ziy@...dia.com>,
Matthew Brost <matthew.brost@...el.com>,
Joshua Hahn <joshua.hahnjy@...il.com>,
Rakie Kim <rakie.kim@...com>,
Byungchul Park <byungchul@...com>,
Gregory Price <gourry@...rry.net>,
Ying Huang <ying.huang@...ux.alibaba.com>,
Alistair Popple <apopple@...dia.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Huan Yang <link@...o.com>,
Christian Brauner <brauner@...nel.org>,
Usama Arif <usamaarif642@...il.com>,
Yu Zhao <yuzhao@...gle.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH 3/9] mm/rmap: simplify rmap_walk invoke
Currently, the rmap walk is split into two functions: rmap_walk_locked
and rmap_walk, but their implementation functionality is very similar.
This patch simplifies the rmap walk function and moves the locked
parameter to rmap walk control.
No functional change.
Signed-off-by: Huan Yang <link@...o.com>
---
include/linux/rmap.h | 3 ++-
mm/migrate.c | 6 ++----
mm/rmap.c | 43 ++++++++++++++++---------------------------
3 files changed, 20 insertions(+), 32 deletions(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 45904ff413ab..f0d17c971a20 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -996,6 +996,7 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
* arg: passed to rmap_one() and invalid_vma()
* try_lock: bail out if the rmap lock is contended
* contended: indicate the rmap traversal bailed out due to lock contention
+ * locked: already locked before invoke rmap_walk
* rmap_one: executed on each vma where page is mapped
* done: for checking traversing termination condition
* anon_lock: for getting anon_lock by optimized way rather than default
@@ -1005,6 +1006,7 @@ struct rmap_walk_control {
void *arg;
bool try_lock;
bool contended;
+ bool locked;
/*
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
@@ -1018,7 +1020,6 @@ struct rmap_walk_control {
};
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
-void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
struct rmap_walk_control *rwc);
diff --git a/mm/migrate.c b/mm/migrate.c
index 8cf0f9c9599d..a5a49af7857a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -355,15 +355,13 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte,
+ .locked = flags & RMP_LOCKED,
.arg = &rmap_walk_arg,
};
VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
- if (flags & RMP_LOCKED)
- rmap_walk_locked(dst, &rwc);
- else
- rmap_walk(dst, &rwc);
+ rmap_walk(dst, &rwc);
}
/*
diff --git a/mm/rmap.c b/mm/rmap.c
index a312cae16bb5..bae9f79c7dc9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2253,14 +2253,12 @@ void try_to_unmap(struct folio *folio, enum ttu_flags flags)
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
.arg = (void *)flags,
+ .locked = flags & TTU_RMAP_LOCKED,
.done = folio_not_mapped,
.anon_lock = folio_lock_anon_vma_read,
};
- if (flags & TTU_RMAP_LOCKED)
- rmap_walk_locked(folio, &rwc);
- else
- rmap_walk(folio, &rwc);
+ rmap_walk(folio, &rwc);
}
/*
@@ -2581,6 +2579,7 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
.rmap_one = try_to_migrate_one,
.arg = (void *)flags,
.done = folio_not_mapped,
+ .locked = flags & TTU_RMAP_LOCKED,
.anon_lock = folio_lock_anon_vma_read,
};
@@ -2607,10 +2606,7 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
if (!folio_test_ksm(folio) && folio_test_anon(folio))
rwc.invalid_vma = invalid_migration_vma;
- if (flags & TTU_RMAP_LOCKED)
- rmap_walk_locked(folio, &rwc);
- else
- rmap_walk(folio, &rwc);
+ rmap_walk(folio, &rwc);
}
#ifdef CONFIG_DEVICE_PRIVATE
@@ -2795,17 +2791,16 @@ static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio,
* rmap method
* @folio: the folio to be handled
* @rwc: control variable according to each walk type
- * @locked: caller holds relevant rmap lock
*
* Find all the mappings of a folio using the mapping pointer and the vma
* chains contained in the anon_vma struct it points to.
*/
-static void rmap_walk_anon(struct folio *folio,
- struct rmap_walk_control *rwc, bool locked)
+static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
pgoff_t pgoff_start, pgoff_end;
struct anon_vma_chain *avc;
+ bool locked = rwc->locked;
if (locked) {
anon_vma = folio_anon_vma(folio);
@@ -2908,14 +2903,14 @@ static void __rmap_walk_file(struct folio *folio, struct address_space *mapping,
* rmap_walk_file - do something to file page using the object-based rmap method
* @folio: the folio to be handled
* @rwc: control variable according to each walk type
- * @locked: caller holds relevant rmap lock
*
* Find all the mappings of a folio using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
*/
-static void rmap_walk_file(struct folio *folio,
- struct rmap_walk_control *rwc, bool locked)
+static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc)
{
+ bool locked = rwc->locked;
+
/*
* The folio lock not only makes sure that folio->mapping cannot
* suddenly be NULLified by truncation, it makes sure that the structure
@@ -2933,23 +2928,17 @@ static void rmap_walk_file(struct folio *folio,
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
{
+ /* no ksm support for now if locked */
+ VM_BUG_ON_FOLIO(rwc->locked && folio_test_ksm(folio), folio);
+ /* if already locked, why try lock again? */
+ VM_BUG_ON(rwc->locked && rwc->try_lock);
+
if (unlikely(folio_test_ksm(folio)))
rmap_walk_ksm(folio, rwc);
else if (folio_test_anon(folio))
- rmap_walk_anon(folio, rwc, false);
- else
- rmap_walk_file(folio, rwc, false);
-}
-
-/* Like rmap_walk, but caller holds relevant rmap lock */
-void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
-{
- /* no ksm support for now */
- VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
- if (folio_test_anon(folio))
- rmap_walk_anon(folio, rwc, true);
+ rmap_walk_anon(folio, rwc);
else
- rmap_walk_file(folio, rwc, true);
+ rmap_walk_file(folio, rwc);
}
#ifdef CONFIG_HUGETLB_PAGE
--
2.34.1
Powered by blists - more mailing lists