[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250822093420.2103803-5-yanquanmin1@huawei.com>
Date: Fri, 22 Aug 2025 17:34:12 +0800
From: Quanmin Yan <yanquanmin1@...wei.com>
To: <sj@...nel.org>
CC: <akpm@...ux-foundation.org>, <damon@...ts.linux.dev>,
<linux-kernel@...r.kernel.org>, <linux-mm@...ck.org>,
<yanquanmin1@...wei.com>, <wangkefeng.wang@...wei.com>, <zuoze1@...wei.com>
Subject: [PATCH v2 04/11] mm/damon/paddr: support addr_unit for DAMOS_LRU_[DE]PRIO
From: SeongJae Park <sj@...nel.org>
Add support of addr_unit for DAMOS_LRU_PRIO and DAMOS_LRU_DEPRIO action
handling from the DAMOS operation implementation for the physical
address space.
Signed-off-by: SeongJae Park <sj@...nel.org>
Signed-off-by: Quanmin Yan <yanquanmin1@...wei.com>
Reviewed-by: SeongJae Park <sj@...nel.org>
---
mm/damon/paddr.c | 29 +++++++++++++++++------------
1 file changed, 17 insertions(+), 12 deletions(-)
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 826c2064dbfd..ed71dd0bf80e 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -194,14 +194,15 @@ static unsigned long damon_pa_pageout(struct damon_region *r,
}
static inline unsigned long damon_pa_mark_accessed_or_deactivate(
- struct damon_region *r, struct damos *s, bool mark_accessed,
+ struct damon_region *r, unsigned long addr_unit,
+ struct damos *s, bool mark_accessed,
unsigned long *sz_filter_passed)
{
- unsigned long addr, applied = 0;
+ phys_addr_t addr, applied = 0;
struct folio *folio;
- addr = r->ar.start;
- while (addr < r->ar.end) {
+ addr = damon_pa_phys_addr(r->ar.start, addr_unit);
+ while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) {
folio = damon_get_folio(PHYS_PFN(addr));
if (damon_pa_invalid_damos_folio(folio, s)) {
addr += PAGE_SIZE;
@@ -211,7 +212,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
if (damos_pa_filter_out(s, folio))
goto put_folio;
else
- *sz_filter_passed += folio_size(folio);
+ *sz_filter_passed += folio_size(folio) / addr_unit;
if (mark_accessed)
folio_mark_accessed(folio);
@@ -223,20 +224,22 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
folio_put(folio);
}
s->last_applied = folio;
- return applied * PAGE_SIZE;
+ return applied * PAGE_SIZE / addr_unit;
}
static unsigned long damon_pa_mark_accessed(struct damon_region *r,
- struct damos *s, unsigned long *sz_filter_passed)
+ unsigned long addr_unit, struct damos *s,
+ unsigned long *sz_filter_passed)
{
- return damon_pa_mark_accessed_or_deactivate(r, s, true,
+ return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, true,
sz_filter_passed);
}
static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
- struct damos *s, unsigned long *sz_filter_passed)
+ unsigned long addr_unit, struct damos *s,
+ unsigned long *sz_filter_passed)
{
- return damon_pa_mark_accessed_or_deactivate(r, s, false,
+ return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, false,
sz_filter_passed);
}
@@ -309,9 +312,11 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
case DAMOS_PAGEOUT:
return damon_pa_pageout(r, aunit, scheme, sz_filter_passed);
case DAMOS_LRU_PRIO:
- return damon_pa_mark_accessed(r, scheme, sz_filter_passed);
+ return damon_pa_mark_accessed(r, aunit, scheme,
+ sz_filter_passed);
case DAMOS_LRU_DEPRIO:
- return damon_pa_deactivate_pages(r, scheme, sz_filter_passed);
+ return damon_pa_deactivate_pages(r, aunit, scheme,
+ sz_filter_passed);
case DAMOS_MIGRATE_HOT:
case DAMOS_MIGRATE_COLD:
return damon_pa_migrate(r, scheme, sz_filter_passed);
--
2.43.0
Powered by blists - more mailing lists