[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251123184329.85287-13-sj@kernel.org>
Date: Sun, 23 Nov 2025 10:43:26 -0800
From: SeongJae Park <sj@...nel.org>
To:
Cc: SeongJae Park <sj@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Masami Hiramatsu <mhiramat@...nel.org>,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
Steven Rostedt <rostedt@...dmis.org>,
damon@...ts.linux.dev,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
linux-trace-kernel@...r.kernel.org
Subject: [RFC PATCH 12/12] mm/damon/core: add trace point for damos stat per apply interval
DAMON users can read DAMOS stats via DAMON sysfs interface. It enables
efficient, simple and flexible usages of the stats. Especially for
systems not having advanced tools like perf or bpftrace, that can be
useful. But if the advanced tools are available, exposing the stats via
tracepoint can reduce unnecessary reimplementation of the wheels. Add a
new tracepoint for DAMOS stats, namely damos_stat_after_apply_interval.
The tracepoint is triggered for each scheme's apply interval and exposes
the whole stat values. If the user needs sub-apply interval information
for any chance, damos_before_apply tracepoint could be used.
Signed-off-by: SeongJae Park <sj@...nel.org>
---
include/trace/events/damon.h | 41 ++++++++++++++++++++++++++++++++++++
mm/damon/core.c | 15 +++++++++++++
2 files changed, 56 insertions(+)
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
index 852d725afea2..24fc402ab3c8 100644
--- a/include/trace/events/damon.h
+++ b/include/trace/events/damon.h
@@ -9,6 +9,47 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+TRACE_EVENT(damos_stat_after_apply_interval,
+
+ TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
+ struct damos_stat *stat),
+
+ TP_ARGS(context_idx, scheme_idx, stat),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, context_idx)
+ __field(unsigned int, scheme_idx)
+ __field(unsigned long, nr_tried)
+ __field(unsigned long, sz_tried)
+ __field(unsigned long, nr_applied)
+ __field(unsigned long, sz_applied)
+ __field(unsigned long, sz_ops_filter_passed)
+ __field(unsigned long, qt_exceeds)
+ __field(unsigned long, nr_snapshots)
+ ),
+
+ TP_fast_assign(
+ __entry->context_idx = context_idx;
+ __entry->scheme_idx = scheme_idx;
+ __entry->nr_tried = stat->nr_tried;
+ __entry->sz_tried = stat->sz_tried;
+ __entry->nr_applied = stat->nr_applied;
+ __entry->sz_applied = stat->sz_applied;
+ __entry->sz_ops_filter_passed = stat->sz_ops_filter_passed;
+ __entry->qt_exceeds = stat->qt_exceeds;
+ __entry->nr_snapshots = stat->nr_snapshots;
+ ),
+
+ TP_printk("ctx_idx=%u scheme_idx=%u nr_tried=%lu sz_tried=%lu "
+ "nr_applied=%lu sz_tried=%lu sz_ops_filter_passed=%lu "
+ "qt_exceeds=%lu nr_snapshots=%lu",
+ __entry->context_idx, __entry->scheme_idx,
+ __entry->nr_tried, __entry->sz_tried,
+ __entry->nr_applied, __entry->sz_applied,
+ __entry->sz_ops_filter_passed, __entry->qt_exceeds,
+ __entry->nr_snapshots)
+);
+
TRACE_EVENT(damos_esz,
TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 36313cd1ff1c..2f212a18e4a0 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -2256,6 +2256,19 @@ static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
quota->min_score = score;
}
+static void damos_trace_stat(struct damon_ctx *c, struct damos *s)
+{
+ unsigned int cidx = 0, sidx = 0;
+ struct damos *siter;
+
+ damon_for_each_scheme(siter, c) {
+ if (siter == s)
+ break;
+ sidx++;
+ }
+ trace_damos_stat_after_apply_interval(cidx, sidx, &s->stat);
+}
+
static void kdamond_apply_schemes(struct damon_ctx *c)
{
struct damon_target *t;
@@ -2294,6 +2307,8 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
(s->apply_interval_us ? s->apply_interval_us :
c->attrs.aggr_interval) / sample_interval;
s->last_applied = NULL;
+ if (trace_damos_stat_after_apply_interval_enabled())
+ damos_trace_stat(c, s);
}
mutex_unlock(&c->walk_control_lock);
}
--
2.47.3
Powered by blists - more mailing lists