[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250828171242.59810-4-sj@kernel.org>
Date: Thu, 28 Aug 2025 10:12:34 -0700
From: SeongJae Park <sj@...nel.org>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: SeongJae Park <sj@...nel.org>,
Quanmin Yan <yanquanmin1@...wei.com>,
damon@...ts.linux.dev,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
wangkefeng.wang@...wei.com,
zuoze1@...wei.com
Subject: [PATCH v3 03/11] mm/damon/paddr: support addr_unit for DAMOS_PAGEOUT
Add support of addr_unit for DAMOS_PAGEOUT action handling from the
DAMOS operation implementation for the physical address space.
Signed-off-by: SeongJae Park <sj@...nel.org>
Signed-off-by: Quanmin Yan <yanquanmin1@...wei.com>
Reviewed-by: SeongJae Park <sj@...nel.org>
---
mm/damon/paddr.c | 30 +++++++++++++++++++++++-------
1 file changed, 23 insertions(+), 7 deletions(-)
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index d497373c2bd2..696aeb0f6c8e 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -24,6 +24,19 @@ static phys_addr_t damon_pa_phys_addr(
return (phys_addr_t)addr * addr_unit;
}
+static unsigned long damon_pa_core_addr(
+ phys_addr_t pa, unsigned long addr_unit)
+{
+ /*
+ * Use div_u64() for avoiding linking errors related with __udivdi3,
+ * __aeabi_uldivmod, or similar problems. This should also improve the
+ * performance optimization (read div_u64() comment for the detail).
+ */
+ if (sizeof(pa) == 8 && sizeof(addr_unit) == 4)
+ return div_u64(pa, addr_unit);
+ return pa / addr_unit;
+}
+
static void damon_pa_mkold(phys_addr_t paddr)
{
struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
@@ -135,10 +148,11 @@ static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s)
return false;
}
-static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
+static unsigned long damon_pa_pageout(struct damon_region *r,
+ unsigned long addr_unit, struct damos *s,
unsigned long *sz_filter_passed)
{
- unsigned long addr, applied;
+ phys_addr_t addr, applied;
LIST_HEAD(folio_list);
bool install_young_filter = true;
struct damos_filter *filter;
@@ -159,8 +173,8 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
damos_add_filter(s, filter);
}
- addr = r->ar.start;
- while (addr < r->ar.end) {
+ addr = damon_pa_phys_addr(r->ar.start, addr_unit);
+ while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
folio = damon_get_folio(PHYS_PFN(addr));
if (damon_pa_invalid_damos_folio(folio, s)) {
addr += PAGE_SIZE;
@@ -170,7 +184,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
if (damos_pa_filter_out(s, folio))
goto put_folio;
else
- *sz_filter_passed += folio_size(folio);
+ *sz_filter_passed += folio_size(folio) / addr_unit;
folio_clear_referenced(folio);
folio_test_clear_young(folio);
@@ -189,7 +203,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
applied = reclaim_pages(&folio_list);
cond_resched();
s->last_applied = folio;
- return applied * PAGE_SIZE;
+ return damon_pa_core_addr(applied * PAGE_SIZE, addr_unit);
}
static inline unsigned long damon_pa_mark_accessed_or_deactivate(
@@ -302,9 +316,11 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
struct damon_target *t, struct damon_region *r,
struct damos *scheme, unsigned long *sz_filter_passed)
{
+ unsigned long aunit = ctx->addr_unit;
+
switch (scheme->action) {
case DAMOS_PAGEOUT:
- return damon_pa_pageout(r, scheme, sz_filter_passed);
+ return damon_pa_pageout(r, aunit, scheme, sz_filter_passed);
case DAMOS_LRU_PRIO:
return damon_pa_mark_accessed(r, scheme, sz_filter_passed);
case DAMOS_LRU_DEPRIO:
--
2.39.5
Powered by blists - more mailing lists