[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251208062943.68824-7-sj@kernel.org>
Date: Sun, 7 Dec 2025 22:29:10 -0800
From: SeongJae Park <sj@...nel.org>
To:
Cc: SeongJae Park <sj@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
damon@...ts.linux.dev,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: [RFC PATCH v3 06/37] mm/damon/paddr: support page fault access check primitive
Extend DAMON operation set for the physical address space (paddr) to
support page fault access check primitive. When DAMON core layer asks
it to use page fault events as its access check primitive, paddr will
install PROT_NONE protection to access sampling target pages, in a way
similar to NUMA_HINT_FAULTS, using the non-upstreamable hack that was
added by the previous commit.
Signed-off-by: SeongJae Park <sj@...nel.org>
---
mm/damon/paddr.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 65 insertions(+), 1 deletion(-)
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 07a8aead439e..698ca6b9dde6 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -14,6 +14,7 @@
#include <linux/swap.h>
#include <linux/memory-tiers.h>
#include <linux/mm_inline.h>
+#include <asm/tlb.h>
#include "../internal.h"
#include "ops-common.h"
@@ -56,7 +57,8 @@ static void __damon_pa_prepare_access_check(struct damon_region *r,
damon_pa_mkold(damon_pa_phys_addr(r->sampling_addr, addr_unit));
}
-static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
+/* Use page table accessed bits */
+static void damon_pa_prepare_access_checks_abit(struct damon_ctx *ctx)
{
struct damon_target *t;
struct damon_region *r;
@@ -67,6 +69,68 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
}
}
+static bool damon_pa_change_protection_one(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr, void *arg)
+{
+ /* todo: batch or remove tlb flushing */
+ struct mmu_gather tlb;
+
+ if (!vma_is_accessible(vma))
+ return true;
+
+ tlb_gather_mmu(&tlb, vma->vm_mm);
+
+ change_protection(&tlb, vma, addr, addr + PAGE_SIZE, MM_CP_DAMON);
+
+ tlb_finish_mmu(&tlb);
+ return true;
+}
+
+static void damon_pa_change_protection(unsigned long paddr)
+{
+ struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
+ struct rmap_walk_control rwc = {
+ .rmap_one = damon_pa_change_protection_one,
+ .anon_lock = folio_lock_anon_vma_read,
+ };
+ bool need_lock;
+
+ if (!folio)
+ return;
+ if (!folio_mapped(folio) || !folio_raw_mapping(folio))
+ return;
+
+ need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
+ if (need_lock && !folio_trylock(folio))
+ return;
+
+ rmap_walk(folio, &rwc);
+
+ if (need_lock)
+ folio_unlock(folio);
+}
+
+static void damon_pa_prepare_access_checks_faults(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ struct damon_region *r;
+
+ damon_for_each_target(t, ctx) {
+ damon_for_each_region(r, t) {
+ r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
+ damon_pa_change_protection(r->sampling_addr);
+ }
+ }
+}
+
+static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
+{
+ if (ctx->sample_control.primitives_enabled.page_table)
+ damon_pa_prepare_access_checks_abit(ctx);
+ if (ctx->sample_control.primitives_enabled.page_fault)
+ damon_pa_prepare_access_checks_faults(ctx);
+}
+
static bool damon_pa_young(phys_addr_t paddr, unsigned long *folio_sz)
{
struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
--
2.47.3
Powered by blists - more mailing lists