[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231123193937.11628-2-ddrokosov@salutedevices.com>
Date: Thu, 23 Nov 2023 22:39:36 +0300
From: Dmitry Rokosov <ddrokosov@...utedevices.com>
To: <rostedt@...dmis.org>, <mhiramat@...nel.org>, <hannes@...xchg.org>,
<mhocko@...nel.org>, <roman.gushchin@...ux.dev>,
<shakeelb@...gle.com>, <muchun.song@...ux.dev>, <mhocko@...e.com>,
<akpm@...ux-foundation.org>
CC: <kernel@...rdevices.ru>, <rockosov@...il.com>,
<cgroups@...r.kernel.org>, <linux-mm@...ck.org>,
<linux-kernel@...r.kernel.org>, <bpf@...r.kernel.org>,
Dmitry Rokosov <ddrokosov@...utedevices.com>
Subject: [PATCH v3 1/2] mm: memcg: print out cgroup ino in the memcg tracepoints
Sometimes, it becomes necessary to determine the memcg tracepoint event
that has occurred. This is particularly relevant in scenarios involving
a large cgroup hierarchy, where users may wish to trace the process of
reclamation within a specific cgroup(s) by applying a filter.
The function cgroup_ino() is a useful tool for this purpose.
To integrate cgroup_ino() into the existing memcg tracepoints, this
patch introduces a new tracepoint template for the begin() and end()
events.
Signed-off-by: Dmitry Rokosov <ddrokosov@...utedevices.com>
---
include/trace/events/vmscan.h | 73 +++++++++++++++++++++++++++++------
mm/vmscan.c | 10 ++---
2 files changed, 66 insertions(+), 17 deletions(-)
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index d2123dd960d5..e9093fa1c924 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -141,19 +141,45 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_b
);
#ifdef CONFIG_MEMCG
-DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
- TP_PROTO(int order, gfp_t gfp_flags),
+DECLARE_EVENT_CLASS(mm_vmscan_memcg_reclaim_begin_template,
- TP_ARGS(order, gfp_flags)
+ TP_PROTO(int order, gfp_t gfp_flags, const struct mem_cgroup *memcg),
+
+ TP_ARGS(order, gfp_flags, memcg),
+
+ TP_STRUCT__entry(
+ __field(int, order)
+ __field(unsigned long, gfp_flags)
+ __field(ino_t, ino)
+ ),
+
+ TP_fast_assign(
+ __entry->order = order;
+ __entry->gfp_flags = (__force unsigned long)gfp_flags;
+ __entry->ino = cgroup_ino(memcg->css.cgroup);
+ ),
+
+ TP_printk("order=%d gfp_flags=%s memcg=%ld",
+ __entry->order,
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->ino)
);
-DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
+DEFINE_EVENT(mm_vmscan_memcg_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
- TP_PROTO(int order, gfp_t gfp_flags),
+ TP_PROTO(int order, gfp_t gfp_flags, const struct mem_cgroup *memcg),
- TP_ARGS(order, gfp_flags)
+ TP_ARGS(order, gfp_flags, memcg)
+);
+
+DEFINE_EVENT(mm_vmscan_memcg_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
+
+ TP_PROTO(int order, gfp_t gfp_flags, const struct mem_cgroup *memcg),
+
+ TP_ARGS(order, gfp_flags, memcg)
);
+
#endif /* CONFIG_MEMCG */
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
@@ -181,19 +207,42 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end
);
#ifdef CONFIG_MEMCG
-DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
- TP_PROTO(unsigned long nr_reclaimed),
+DECLARE_EVENT_CLASS(mm_vmscan_memcg_reclaim_end_template,
- TP_ARGS(nr_reclaimed)
+ TP_PROTO(unsigned long nr_reclaimed, const struct mem_cgroup *memcg),
+
+ TP_ARGS(nr_reclaimed, memcg),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, nr_reclaimed)
+ __field(ino_t, ino)
+ ),
+
+ TP_fast_assign(
+ __entry->nr_reclaimed = nr_reclaimed;
+ __entry->ino = cgroup_ino(memcg->css.cgroup);
+ ),
+
+ TP_printk("nr_reclaimed=%lu memcg=%ld",
+ __entry->nr_reclaimed,
+ __entry->ino)
);
-DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_reclaim_end,
+DEFINE_EVENT(mm_vmscan_memcg_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
- TP_PROTO(unsigned long nr_reclaimed),
+ TP_PROTO(unsigned long nr_reclaimed, const struct mem_cgroup *memcg),
- TP_ARGS(nr_reclaimed)
+ TP_ARGS(nr_reclaimed, memcg)
);
+
+DEFINE_EVENT(mm_vmscan_memcg_reclaim_end_template, mm_vmscan_memcg_softlimit_reclaim_end,
+
+ TP_PROTO(unsigned long nr_reclaimed, const struct mem_cgroup *memcg),
+
+ TP_ARGS(nr_reclaimed, memcg)
+);
+
#endif /* CONFIG_MEMCG */
TRACE_EVENT(mm_shrink_slab_start,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1080209a568b..45780952f4b5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -7088,8 +7088,8 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
- trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
- sc.gfp_mask);
+ trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order, sc.gfp_mask,
+ memcg);
/*
* NOTE: Although we can get the priority field, using it
@@ -7100,7 +7100,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
*/
shrink_lruvec(lruvec, &sc);
- trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
+ trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed, memcg);
*nr_scanned = sc.nr_scanned;
@@ -7134,13 +7134,13 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
set_task_reclaim_state(current, &sc.reclaim_state);
- trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
+ trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask, memcg);
noreclaim_flag = memalloc_noreclaim_save();
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
memalloc_noreclaim_restore(noreclaim_flag);
- trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
+ trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed, memcg);
set_task_reclaim_state(current, NULL);
return nr_reclaimed;
--
2.36.0
Powered by blists - more mailing lists