[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240426195247.100306-2-sj@kernel.org>
Date: Fri, 26 Apr 2024 12:52:40 -0700
From: SeongJae Park <sj@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: SeongJae Park <sj@...nel.org>,
damon@...ts.linux.dev,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
Honggyu Kim <honggyu.kim@...com>
Subject: [PATCH 1/7] mm/damon/paddr: implement damon_folio_young()
damon_pa_young() receives physical address, get the folio covering the
address, and show if the folio is accessed since the last check. A
following commit will reuse the internal logic for checking access to a
given folio. To avoid duplication of the code, split the internal
logic. Also, change the rmap walker function's name from
__damon_pa_young() to damon_folio_young_one(), following the change of
the caller's name and the naming rule that more commonly used by other
rmap walkers.
Signed-off-by: SeongJae Park <sj@...nel.org>
Tested-by: Honggyu Kim <honggyu.kim@...com>
---
mm/damon/paddr.c | 32 +++++++++++++++++++-------------
1 file changed, 19 insertions(+), 13 deletions(-)
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 5e6dc312072cd..25c3ba2a9eaf4 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -79,8 +79,8 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
}
}
-static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
- unsigned long addr, void *arg)
+static bool damon_folio_young_one(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr, void *arg)
{
bool *accessed = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
@@ -111,38 +111,44 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
return *accessed == false;
}
-static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
+static bool damon_folio_young(struct folio *folio)
{
- struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
bool accessed = false;
struct rmap_walk_control rwc = {
.arg = &accessed,
- .rmap_one = __damon_pa_young,
+ .rmap_one = damon_folio_young_one,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
- if (!folio)
- return false;
-
if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
if (folio_test_idle(folio))
- accessed = false;
+ return false;
else
- accessed = true;
- goto out;
+ return true;
}
need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
if (need_lock && !folio_trylock(folio))
- goto out;
+ return false;
rmap_walk(folio, &rwc);
if (need_lock)
folio_unlock(folio);
-out:
+ return accessed;
+}
+
+static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
+{
+ struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
+ bool accessed;
+
+ if (!folio)
+ return false;
+
+ accessed = damon_folio_young(folio);
*folio_sz = folio_size(folio);
folio_put(folio);
return accessed;
--
2.39.2
Powered by blists - more mailing lists