[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20231220012123.2787-2-cuibixuan@vivo.com>
Date: Tue, 19 Dec 2023 17:21:22 -0800
From: Bixuan Cui <cuibixuan@...o.com>
To: rostedt@...dmis.org,
mhiramat@...nel.org,
mathieu.desnoyers@...icios.com,
akpm@...ux-foundation.org
Cc: linux-kernel@...r.kernel.org,
linux-trace-kernel@...r.kernel.org,
linux-mm@...ck.org,
cuibixuan@...o.com,
opensource.kernel@...o.com
Subject: [PATCH -next v4 1/2] mm: shrinker: add new event to trace shrink count
From: cuibixuan <cuibixuan@...o.com>
do_shrink_slab() calculates the freeable memory through shrinker->count_objects(),
and then reclaims the memory through shrinker->scan_objects(). When reclaiming
memory, shrinker->count_objects() takes a certain amount of time:
Fun spend(us)
ext4_es_count 4302
ext4_es_scan 12
super_cache_count 4195
super_cache_scan 2103
Therefore, adding the trace event to count_objects() can more accurately
obtain the time taken for slab memory recycling.
Example of output:
kswapd0-103 [003] ..... 1098.317942: mm_shrink_count_start: kfree_rcu_shrink_count.cfi_jt+0x0/0x8 00000000c540ff51: nid: 0
kswapd0-103 [003] ..... 1098.317951: mm_shrink_count_end: kfree_rcu_shrink_count.cfi_jt+0x0/0x8 00000000c540ff51: nid: 0 freeable:36
Signed-off-by: Bixuan Cui <cuibixuan@...o.com>
Reviewed-by: Steven Rostedt <rostedt@...dmis.org>
---
v4: Add Reviewed-by and Changlog to every patch.
v3: Swap the positions of 'nid' and 'freeable' to prevent the hole in the trace event.
include/trace/events/vmscan.h | 49 +++++++++++++++++++++++++++++++++++
mm/shrinker.c | 4 +++
2 files changed, 53 insertions(+)
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 1a488c30afa5..b99cd28c9815 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -196,6 +196,55 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
);
#endif /* CONFIG_MEMCG */
+TRACE_EVENT(mm_shrink_count_start,
+ TP_PROTO(struct shrinker *shr, struct shrink_control *sc),
+
+ TP_ARGS(shr, sc),
+
+ TP_STRUCT__entry(
+ __field(struct shrinker *, shr)
+ __field(void *, shrink)
+ __field(int, nid)
+ ),
+
+ TP_fast_assign(
+ __entry->shr = shr;
+ __entry->shrink = shr->count_objects;
+ __entry->nid = sc->nid;
+ ),
+
+ TP_printk("%pS %p: nid: %d",
+ __entry->shrink,
+ __entry->shr,
+ __entry->nid)
+);
+
+TRACE_EVENT(mm_shrink_count_end,
+ TP_PROTO(struct shrinker *shr, struct shrink_control *sc, long freeable),
+
+ TP_ARGS(shr, sc, freeable),
+
+ TP_STRUCT__entry(
+ __field(struct shrinker *, shr)
+ __field(void *, shrink)
+ __field(long, freeable)
+ __field(int, nid)
+ ),
+
+ TP_fast_assign(
+ __entry->shr = shr;
+ __entry->shrink = shr->count_objects;
+ __entry->freeable = freeable;
+ __entry->nid = sc->nid;
+ ),
+
+ TP_printk("%pS %p: nid: %d freeable:%ld",
+ __entry->shrink,
+ __entry->shr,
+ __entry->nid,
+ __entry->freeable)
+);
+
TRACE_EVENT(mm_shrink_slab_start,
TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
long nr_objects_to_shrink, unsigned long cache_items,
diff --git a/mm/shrinker.c b/mm/shrinker.c
index dd91eab43ed3..d0c7bf61db61 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -379,7 +379,11 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
: SHRINK_BATCH;
long scanned = 0, next_deferred;
+ trace_mm_shrink_count_start(shrinker, shrinkctl);
+
freeable = shrinker->count_objects(shrinker, shrinkctl);
+
+ trace_mm_shrink_count_end(shrinker, shrinkctl, freeable);
if (freeable == 0 || freeable == SHRINK_EMPTY)
return freeable;
--
2.17.1
Powered by blists - more mailing lists