[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250727201813.53858-5-sj@kernel.org>
Date: Sun, 27 Jul 2025 13:18:10 -0700
From: SeongJae Park <sj@...nel.org>
To:
Cc: SeongJae Park <sj@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
damon@...ts.linux.dev,
kernel-team@...a.com,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: [RFC v2 4/7] mm/damon/core: read received access reports
Reported access information is only saved in the core layer's internal
data structure. Those are not really being used for final monitoring
results. Update core layer access information (DAMON regions) using the
reported access information.
Signed-off-by: SeongJae Park <sj@...nel.org>
---
include/linux/damon.h | 1 +
mm/damon/core.c | 66 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 67 insertions(+)
diff --git a/include/linux/damon.h b/include/linux/damon.h
index f3e5c585b5f1..8ec49beac573 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -83,6 +83,7 @@ struct damon_region {
unsigned int age;
/* private: Internal value for age calculation. */
unsigned int last_nr_accesses;
+ bool access_reported;
};
/**
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 4e25fe100b56..428ac1b83118 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -141,6 +141,7 @@ struct damon_region *damon_new_region(unsigned long start, unsigned long end)
region->age = 0;
region->last_nr_accesses = 0;
+ region->access_reported = false;
return region;
}
@@ -2560,6 +2561,69 @@ static void kdamond_init_ctx(struct damon_ctx *ctx)
}
}
+static void kdamond_apply_access_report(struct damon_access_report *report,
+ struct damon_target *t, struct damon_ctx *ctx)
+{
+ struct damon_region *r;
+
+ if (ctx->ops.eligible_report && !ctx->ops.eligible_report(report, t))
+ return;
+
+ /* todo: make search faster, e.g., binary search? */
+ damon_for_each_region(r, t) {
+ if (report->addr < r->ar.start)
+ continue;
+ if (r->ar.end < report->addr + report->size)
+ continue;
+ if (!r->access_reported)
+ damon_update_region_access_rate(r, true, &ctx->attrs);
+ r->access_reported = true;
+ }
+}
+
+static unsigned int kdamond_apply_zero_access_report(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ struct damon_region *r;
+ unsigned int max_nr_accesses = 0;
+
+ damon_for_each_target(t, ctx) {
+ damon_for_each_region(r, t) {
+ if (r->access_reported)
+ r->access_reported = false;
+ else
+ damon_update_region_access_rate(r, false,
+ &ctx->attrs);
+ max_nr_accesses = max(max_nr_accesses, r->nr_accesses);
+ }
+ }
+ return max_nr_accesses;
+}
+
+static unsigned int kdamond_check_reported_accesses(struct damon_ctx *ctx)
+{
+ int i;
+ struct damon_access_report *report;
+ struct damon_target *t;
+
+ mutex_lock(&damon_access_reports_lock);
+ for (i = 0; i < damon_access_reports_len; i++) {
+ report = &damon_access_reports[i];
+ if (time_before(report->report_jiffies,
+ jiffies -
+ usecs_to_jiffies(
+ ctx->attrs.sample_interval)))
+ continue;
+ if (report->pid && !damon_target_has_pid(ctx))
+ continue;
+ damon_for_each_target(t, ctx)
+ kdamond_apply_access_report(report, t, ctx);
+ }
+ mutex_unlock(&damon_access_reports_lock);
+ /* For nr_accesses_bp, absence of access should also be reported. */
+ return kdamond_apply_zero_access_report(ctx);
+}
+
/*
* The monitoring daemon that runs as a kernel thread
*/
@@ -2607,6 +2671,8 @@ static int kdamond_fn(void *data)
if (ctx->ops.check_accesses)
max_nr_accesses = ctx->ops.check_accesses(ctx);
+ else
+ max_nr_accesses = kdamond_check_reported_accesses(ctx);
if (ctx->passed_sample_intervals >= next_aggregation_sis)
kdamond_merge_regions(ctx,
--
2.39.5
Powered by blists - more mailing lists