[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20100726120325.2EF4.A69D9226@jp.fujitsu.com>
Date: Mon, 26 Jul 2010 12:04:13 +0900 (JST)
From: KOSAKI Motohiro <kosaki.motohiro@...fujitsu.com>
To: KOSAKI Motohiro <kosaki.motohiro@...fujitsu.com>
Cc: kosaki.motohiro@...fujitsu.com,
LKML <linux-kernel@...r.kernel.org>,
linux-mm <linux-mm@...ck.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Mel Gorman <mel@....ul.ie>,
KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>,
Nishimura Daisuke <d-nishimura@....biglobe.ne.jp>,
Balbir Singh <balbir@...ux.vnet.ibm.com>
Subject: [PATCH 2/4] memcg, vmscan: add memcg reclaim tracepoint
Memcg also need to trace reclaim progress as direct reclaim. This patch
add it.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@...fujitsu.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@...fujitsu.com>
Acked-by: Mel Gorman <mel@....ul.ie>
Acked-by: Balbir Singh <balbir@...ux.vnet.ibm.com>
---
include/trace/events/vmscan.h | 28 ++++++++++++++++++++++++++++
mm/vmscan.c | 20 +++++++++++++++++++-
2 files changed, 47 insertions(+), 1 deletions(-)
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index a601763..c35e905 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -99,6 +99,19 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_b
TP_ARGS(order, may_writepage, gfp_flags)
);
+DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
+
+ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
+
+ TP_ARGS(order, may_writepage, gfp_flags)
+);
+
+DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
+
+ TP_PROTO(int order, int may_writepage, gfp_t gfp_flags),
+
+ TP_ARGS(order, may_writepage, gfp_flags)
+);
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_end_template,
@@ -124,6 +137,21 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_direct_reclaim_end
TP_ARGS(nr_reclaimed)
);
+DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_reclaim_end,
+
+ TP_PROTO(unsigned long nr_reclaimed),
+
+ TP_ARGS(nr_reclaimed)
+);
+
+DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_reclaim_end,
+
+ TP_PROTO(unsigned long nr_reclaimed),
+
+ TP_ARGS(nr_reclaimed)
+);
+
+
TRACE_EVENT(mm_vmscan_lru_isolate,
TP_PROTO(int order,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 97170eb..c691967 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1949,6 +1949,11 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
sc.nodemask = &nm;
sc.nr_reclaimed = 0;
sc.nr_scanned = 0;
+
+ trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
+ sc.may_writepage,
+ sc.gfp_mask);
+
/*
* NOTE: Although we can get the priority field, using it
* here is not a good idea, since it limits the pages we can scan.
@@ -1957,6 +1962,9 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
* the priority and make it zero.
*/
shrink_zone(0, zone, &sc);
+
+ trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
+
return sc.nr_reclaimed;
}
@@ -1966,6 +1974,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
unsigned int swappiness)
{
struct zonelist *zonelist;
+ unsigned long nr_reclaimed;
struct scan_control sc = {
.may_writepage = !laptop_mode,
.may_unmap = 1,
@@ -1980,7 +1989,16 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
zonelist = NODE_DATA(numa_node_id())->node_zonelists;
- return do_try_to_free_pages(zonelist, &sc);
+
+ trace_mm_vmscan_memcg_reclaim_begin(0,
+ sc.may_writepage,
+ sc.gfp_mask);
+
+ nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+
+ trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
+
+ return nr_reclaimed;
}
#endif
--
1.6.5.2
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists