[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240104100527.3908-2-cuibixuan@vivo.com>
Date: Thu, 4 Jan 2024 02:05:26 -0800
From: Bixuan Cui <cuibixuan@...o.com>
To: akpm@...ux-foundation.org,
rostedt@...dmis.org,
mhiramat@...nel.org,
mathieu.desnoyers@...icios.com
Cc: linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org,
linux-mm@...ck.org,
opensource.kernel@...o.com,
cuibixuan@...o.com
Subject: [PATCH -next v5 1/2] mm: shrinker: add new event to trace shrink count
From: cuibixuan <cuibixuan@...o.com>
do_shrink_slab() calculates the freeable memory through shrinker->count_objects(),
and then reclaims the memory through shrinker->scan_objects(). When reclaiming
memory, shrinker->count_objects() takes a certain amount of time:
Fun spend(us)
ext4_es_count 4302
ext4_es_scan 12
super_cache_count 4195
super_cache_scan 2103
Therefore, adding the trace event to count_objects() can more accurately
obtain the time taken for slab memory recycling.
Example of output:
kswapd0-103 [003] ..... 1098.317942: mm_shrink_count_start: kfree_rcu_shrink_count.cfi_jt+0x0/0x8 00000000c540ff51: nid: 0
kswapd0-103 [003] ..... 1098.317951: mm_shrink_count_end: kfree_rcu_shrink_count.cfi_jt+0x0/0x8 00000000c540ff51: nid: 0 freeable:36
Signed-off-by: Bixuan Cui <cuibixuan@...o.com>
Reviewed-by: Steven Rostedt <rostedt@...dmis.org>
---
Changes:
v5: * Use 'DECLARE_EVENT_CLASS(mm_vmscan_lru_shrink_start_template' to
replace 'RACE_EVENT(mm_vmscan_lru_shrink_inactive/active_start'
* Add the explanation for adding new shrink lru events into 'mm: vmscan: add new event to trace shrink lru'
v4: * Add Reviewed-by and Changlog to every patch.
v3: * Swap the positions of 'nid' and 'freeable' to prevent the hole in the trace event.
v2: * Modify trace_mm_vmscan_lru_shrink_inactive() in evict_folios() at the same time to fix build error (Andrew pointed out).
include/trace/events/vmscan.h | 49 +++++++++++++++++++++++++++++++++++
mm/shrinker.c | 4 +++
2 files changed, 53 insertions(+)
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 1a488c30afa5..b99cd28c9815 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -196,6 +196,55 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
);
#endif /* CONFIG_MEMCG */
+TRACE_EVENT(mm_shrink_count_start,
+ TP_PROTO(struct shrinker *shr, struct shrink_control *sc),
+
+ TP_ARGS(shr, sc),
+
+ TP_STRUCT__entry(
+ __field(struct shrinker *, shr)
+ __field(void *, shrink)
+ __field(int, nid)
+ ),
+
+ TP_fast_assign(
+ __entry->shr = shr;
+ __entry->shrink = shr->count_objects;
+ __entry->nid = sc->nid;
+ ),
+
+ TP_printk("%pS %p: nid: %d",
+ __entry->shrink,
+ __entry->shr,
+ __entry->nid)
+);
+
+TRACE_EVENT(mm_shrink_count_end,
+ TP_PROTO(struct shrinker *shr, struct shrink_control *sc, long freeable),
+
+ TP_ARGS(shr, sc, freeable),
+
+ TP_STRUCT__entry(
+ __field(struct shrinker *, shr)
+ __field(void *, shrink)
+ __field(long, freeable)
+ __field(int, nid)
+ ),
+
+ TP_fast_assign(
+ __entry->shr = shr;
+ __entry->shrink = shr->count_objects;
+ __entry->freeable = freeable;
+ __entry->nid = sc->nid;
+ ),
+
+ TP_printk("%pS %p: nid: %d freeable:%ld",
+ __entry->shrink,
+ __entry->shr,
+ __entry->nid,
+ __entry->freeable)
+);
+
TRACE_EVENT(mm_shrink_slab_start,
TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
long nr_objects_to_shrink, unsigned long cache_items,
diff --git a/mm/shrinker.c b/mm/shrinker.c
index dd91eab43ed3..d0c7bf61db61 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -379,7 +379,11 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
: SHRINK_BATCH;
long scanned = 0, next_deferred;
+ trace_mm_shrink_count_start(shrinker, shrinkctl);
+
freeable = shrinker->count_objects(shrinker, shrinkctl);
+
+ trace_mm_shrink_count_end(shrinker, shrinkctl, freeable);
if (freeable == 0 || freeable == SHRINK_EMPTY)
return freeable;
--
2.17.1
Powered by blists - more mailing lists