[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ebcd40f1-9bc6-4ae4-8b12-deedf9b8b450@arm.com>
Date: Fri, 12 Apr 2024 10:43:23 +0100
From: Ryan Roberts <ryan.roberts@....com>
To: Barry Song <21cnbao@...il.com>, akpm@...ux-foundation.org,
linux-mm@...ck.org
Cc: cerasuolodomenico@...il.com, chrisl@...nel.org, david@...hat.com,
kasong@...cent.com, linux-kernel@...r.kernel.org, peterx@...hat.com,
surenb@...gle.com, v-songbaohua@...o.com, willy@...radead.org,
yosryahmed@...gle.com, yuzhao@...gle.com, corbet@....net
Subject: Re: [PATCH v5 2/4] mm: add per-order mTHP anon_swpout and
anon_swpout_fallback counters
On 12/04/2024 08:37, Barry Song wrote:
> From: Barry Song <v-songbaohua@...o.com>
>
> This helps to display the fragmentation situation of the swapfile, knowing
> the proportion of how much we haven't split large folios. So far, we only
> support non-split swapout for anon memory, with the possibility of
> expanding to shmem in the future. So, we add the "anon" prefix to the
> counter names.
>
> Signed-off-by: Barry Song <v-songbaohua@...o.com>
> Cc: Chris Li <chrisl@...nel.org>
> Cc: David Hildenbrand <david@...hat.com>
> Cc: Domenico Cerasuolo <cerasuolodomenico@...il.com>
> Cc: Kairui Song <kasong@...cent.com>
> Cc: Matthew Wilcox (Oracle) <willy@...radead.org>
> Cc: Peter Xu <peterx@...hat.com>
> Cc: Ryan Roberts <ryan.roberts@....com>
> Cc: Suren Baghdasaryan <surenb@...gle.com>
> Cc: Yosry Ahmed <yosryahmed@...gle.com>
> Cc: Yu Zhao <yuzhao@...gle.com>
LGTM!
Reviewed-by: Ryan Roberts <ryan.roberts@....com>
> ---
> include/linux/huge_mm.h | 2 ++
> mm/huge_memory.c | 4 ++++
> mm/page_io.c | 1 +
> mm/vmscan.c | 3 +++
> 4 files changed, 10 insertions(+)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index c5beb54b97cb..b69c3b3e1436 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -268,6 +268,8 @@ enum mthp_stat_item {
> MTHP_STAT_ANON_FAULT_ALLOC,
> MTHP_STAT_ANON_FAULT_FALLBACK,
> MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
> + MTHP_STAT_ANON_SWPOUT,
> + MTHP_STAT_ANON_SWPOUT_FALLBACK,
> __MTHP_STAT_COUNT
> };
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 21c4ac74b484..13e74724d0c3 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -557,11 +557,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
> DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
> DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
> DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
> +DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
> +DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
>
> static struct attribute *stats_attrs[] = {
> &anon_fault_alloc_attr.attr,
> &anon_fault_fallback_attr.attr,
> &anon_fault_fallback_charge_attr.attr,
> + &anon_swpout_attr.attr,
> + &anon_swpout_fallback_attr.attr,
> NULL,
> };
>
> diff --git a/mm/page_io.c b/mm/page_io.c
> index a9a7c236aecc..46c603dddf04 100644
> --- a/mm/page_io.c
> +++ b/mm/page_io.c
> @@ -217,6 +217,7 @@ static inline void count_swpout_vm_event(struct folio *folio)
> count_memcg_folio_events(folio, THP_SWPOUT, 1);
> count_vm_event(THP_SWPOUT);
> }
> + count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT);
> #endif
> count_vm_events(PSWPOUT, folio_nr_pages(folio));
> }
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index bca2d9981c95..49bd94423961 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -1231,6 +1231,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> goto activate_locked;
> }
> if (!add_to_swap(folio)) {
> + int __maybe_unused order = folio_order(folio);
> +
> if (!folio_test_large(folio))
> goto activate_locked_split;
> /* Fallback to swap normal pages */
> @@ -1242,6 +1244,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> THP_SWPOUT_FALLBACK, 1);
> count_vm_event(THP_SWPOUT_FALLBACK);
> }
> + count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK);
> #endif
> if (!add_to_swap(folio))
> goto activate_locked_split;
Powered by blists - more mailing lists