[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190314101915.GI7473@dhcp22.suse.cz>
Date: Thu, 14 Mar 2019 11:19:15 +0100
From: Michal Hocko <mhocko@...nel.org>
To: Yafang Shao <laoar.shao@...il.com>
Cc: vbabka@...e.cz, jrdr.linux@...il.com, akpm@...ux-foundation.org,
linux-mm@...ck.org, linux-kernel@...r.kernel.org,
shaoyafang@...iglobal.com
Subject: Re: [PATCH] mm: vmscan: drop may_writepage and classzone_idx from
direct reclaim begin template
On Fri 01-03-19 14:24:12, Yafang Shao wrote:
> There are three tracepoints using this template, which are
> mm_vmscan_direct_reclaim_begin,
> mm_vmscan_memcg_reclaim_begin,
> mm_vmscan_memcg_softlimit_reclaim_begin.
>
> Regarding mm_vmscan_direct_reclaim_begin,
> sc.may_writepage is !laptop_mode, that's a static setting, and
> reclaim_idx is derived from gfp_mask which is already show in this
> tracepoint.
>
> Regarding mm_vmscan_memcg_reclaim_begin,
> may_writepage is !laptop_mode too, and reclaim_idx is (MAX_NR_ZONES-1),
> which are both static value.
>
> mm_vmscan_memcg_softlimit_reclaim_begin is the same with
> mm_vmscan_memcg_reclaim_begin.
>
> So we can drop them all.
I agree. Although classzone_idx is PITA to calculate nothing really
prevents us to have a tool to do that. may_writepage is not all that
useful anymore.
> Signed-off-by: Yafang Shao <laoar.shao@...il.com>
>From a quick glance this looks ok. I haven't really checked deeply or
tried to compile it but the change makes sense.
Acked-by: Michal Hocko <mhocko@...e.com>
> ---
> include/trace/events/vmscan.h | 26 ++++++++++----------------
> mm/vmscan.c | 14 +++-----------
> 2 files changed, 13 insertions(+), 27 deletions(-)
>
> diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
> index a1cb913..153d90c 100644
> --- a/include/trace/events/vmscan.h
> +++ b/include/trace/events/vmscan.h
> @@ -105,51 +105,45 @@
>
> DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
>
> - TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
> + TP_PROTO(int order, gfp_t gfp_flags),
>
> - TP_ARGS(order, may_writepage, gfp_flags, classzone_idx),
> + TP_ARGS(order, gfp_flags),
>
> TP_STRUCT__entry(
> __field( int, order )
> - __field( int, may_writepage )
> __field( gfp_t, gfp_flags )
> - __field( int, classzone_idx )
> ),
>
> TP_fast_assign(
> __entry->order = order;
> - __entry->may_writepage = may_writepage;
> __entry->gfp_flags = gfp_flags;
> - __entry->classzone_idx = classzone_idx;
> ),
>
> - TP_printk("order=%d may_writepage=%d gfp_flags=%s classzone_idx=%d",
> + TP_printk("order=%d gfp_flags=%s",
> __entry->order,
> - __entry->may_writepage,
> - show_gfp_flags(__entry->gfp_flags),
> - __entry->classzone_idx)
> + show_gfp_flags(__entry->gfp_flags))
> );
>
> DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin,
>
> - TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
> + TP_PROTO(int order, gfp_t gfp_flags),
>
> - TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
> + TP_ARGS(order, gfp_flags)
> );
>
> #ifdef CONFIG_MEMCG
> DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
>
> - TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
> + TP_PROTO(int order, gfp_t gfp_flags),
>
> - TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
> + TP_ARGS(order, gfp_flags)
> );
>
> DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
>
> - TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
> + TP_PROTO(int order, gfp_t gfp_flags),
>
> - TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
> + TP_ARGS(order, gfp_flags)
> );
> #endif /* CONFIG_MEMCG */
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index ac4806f..cdc0305 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -3304,10 +3304,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
> if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
> return 1;
>
> - trace_mm_vmscan_direct_reclaim_begin(order,
> - sc.may_writepage,
> - sc.gfp_mask,
> - sc.reclaim_idx);
> + trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
>
> nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
>
> @@ -3338,9 +3335,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
> (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
>
> trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
> - sc.may_writepage,
> - sc.gfp_mask,
> - sc.reclaim_idx);
> + sc.gfp_mask);
>
> /*
> * NOTE: Although we can get the priority field, using it
> @@ -3389,10 +3384,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
>
> zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
>
> - trace_mm_vmscan_memcg_reclaim_begin(0,
> - sc.may_writepage,
> - sc.gfp_mask,
> - sc.reclaim_idx);
> + trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
>
> psi_memstall_enter(&pflags);
> noreclaim_flag = memalloc_noreclaim_save();
> --
> 1.8.3.1
>
--
Michal Hocko
SUSE Labs
Powered by blists - more mailing lists