[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20260123045733.6954-3-ravis.opensrc@gmail.com>
Date: Thu, 22 Jan 2026 20:57:25 -0800
From: Ravi Jonnalagadda <ravis.opensrc@...il.com>
To: damon@...ts.linux.dev,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org,
linux-doc@...r.kernel.org
Cc: sj@...nel.org,
akpm@...ux-foundation.org,
corbet@....net,
bijan311@...il.com,
ajayjoshi@...ron.com,
Ravi Jonnalagadda <ravis.opensrc@...il.com>
Subject: [RFC PATCH 2/5] mm/damon: add get_goal_metric() op and PA provider
Introduce an optional damon_operations callback `get_goal_metric()`
that lets ops providers compute goal metrics requiring address-space
knowledge.
Provide a PA implementation that handles DAMOS_QUOTA_NODE_SYS_BP by
iterating the monitored PFN regions and attributing bytes to the
goal's nid.
Core remains generic and asks ops only when needed.
Signed-off-by: Ravi Jonnalagadda <ravis.opensrc@...il.com>
---
include/linux/damon.h | 3 +++
mm/damon/paddr.c | 58 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 61 insertions(+)
diff --git a/include/linux/damon.h b/include/linux/damon.h
index ec5ed1a217fc..67233898c27c 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -649,6 +649,9 @@ struct damon_operations {
bool (*target_valid)(struct damon_target *t);
void (*cleanup_target)(struct damon_target *t);
void (*cleanup)(struct damon_ctx *context);
+ unsigned long (*get_goal_metric)(struct damon_ctx *ctx,
+ struct damos *scheme,
+ const struct damos_quota_goal *goal);
};
/*
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 07a8aead439e..30e4e5663dcb 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -14,6 +14,8 @@
#include <linux/swap.h>
#include <linux/memory-tiers.h>
#include <linux/mm_inline.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
#include "../internal.h"
#include "ops-common.h"
@@ -148,6 +150,48 @@ static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s)
return false;
}
+/* System total RAM in bytes (denominator for bp computation) */
+static unsigned long damon_pa_totalram_bytes(void)
+{
+ return (unsigned long)totalram_pages() << PAGE_SHIFT;
+}
+
+/*
+ * Compute node-scoped system bp for PA contexts:
+ * bp = (bytes attributed to goal->nid across monitored PA regions) /
+ * (system total bytes) * 10000
+ */
+static unsigned long damon_pa_get_node_sys_bp(struct damon_ctx *ctx,
+ struct damos *scheme,
+ const struct damos_quota_goal *goal)
+{
+ int nid = goal ? goal->nid : -1;
+ unsigned long node_bytes = 0;
+ unsigned long total_bytes = damon_pa_totalram_bytes();
+ struct damon_target *t;
+ struct damon_region *r;
+
+ if (nid < 0 || !total_bytes)
+ return 0;
+
+ damon_for_each_target(t, ctx) {
+ damon_for_each_region(r, t) {
+ unsigned long start_pfn = r->ar.start >> PAGE_SHIFT;
+ unsigned long end_pfn = r->ar.end >> PAGE_SHIFT;
+ unsigned long pfn;
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn++) {
+ if (!pfn_valid(pfn))
+ continue;
+ if (page_to_nid(pfn_to_page(pfn)) == nid)
+ node_bytes += PAGE_SIZE;
+ }
+ }
+ }
+
+ return div64_u64((u64)node_bytes * 10000ULL, total_bytes);
+}
+
static unsigned long damon_pa_pageout(struct damon_region *r,
unsigned long addr_unit, struct damos *s,
unsigned long *sz_filter_passed)
@@ -344,6 +388,19 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
return 0;
}
+/* Generic goal-metric provider for PA */
+static unsigned long damon_pa_get_goal_metric(struct damon_ctx *ctx,
+ struct damos *scheme,
+ const struct damos_quota_goal *goal)
+{
+ switch (goal ? goal->metric : -1) {
+ case DAMOS_QUOTA_NODE_SYS_BP:
+ return damon_pa_get_node_sys_bp(ctx, scheme, goal);
+ default:
+ return 0;
+ }
+}
+
static int damon_pa_scheme_score(struct damon_ctx *context,
struct damon_target *t, struct damon_region *r,
struct damos *scheme)
@@ -378,6 +435,7 @@ static int __init damon_pa_initcall(void)
.cleanup = NULL,
.apply_scheme = damon_pa_apply_scheme,
.get_scheme_score = damon_pa_scheme_score,
+ .get_goal_metric = damon_pa_get_goal_metric,
};
return damon_register_ops(&ops);
--
2.43.0
Powered by blists - more mailing lists