[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJD7tkbnZHCPu0Bqs2xNyP6FQviuq6kGSQa840H+sVhiPEbYpA@mail.gmail.com>
Date: Fri, 20 Sep 2024 15:57:31 -0700
From: Yosry Ahmed <yosryahmed@...gle.com>
To: Kanchana P Sridhar <kanchana.p.sridhar@...el.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org, hannes@...xchg.org,
nphamcs@...il.com, chengming.zhou@...ux.dev, usamaarif642@...il.com,
ryan.roberts@....com, ying.huang@...el.com, 21cnbao@...il.com,
akpm@...ux-foundation.org, nanhai.zou@...el.com, wajdi.k.feghali@...el.com,
vinodh.gopal@...el.com
Subject: Re: [PATCH v6 3/3] mm: swap: Count successful mTHP ZSWAP stores in
sysfs mTHP zswpout stats.
On Thu, Aug 29, 2024 at 2:27 PM Kanchana P Sridhar
<kanchana.p.sridhar@...el.com> wrote:
>
> Add a new MTHP_STAT_ZSWPOUT entry to the sysfs mTHP stats so that
> per-order mTHP folio ZSWAP stores can be accounted.
>
> If zswap_store() successfully swaps out an mTHP, it will be counted under
> the per-order sysfs "zswpout" stats:
>
> /sys/kernel/mm/transparent_hugepage/hugepages-*kB/stats/zswpout
>
> Other block dev/fs mTHP swap-out events will be counted under
> the existing sysfs "swpout" stats:
>
> /sys/kernel/mm/transparent_hugepage/hugepages-*kB/stats/swpout
>
> Based on changes made in commit 61e751c01466ffef5dc72cb64349454a691c6bfe
> ("mm: cleanup count_mthp_stat() definition"), this patch also moves
> the call to count_mthp_stat() in count_swpout_vm_event() to be outside
> the "ifdef CONFIG_TRANSPARENT_HUGEPAGE".
This should be in a separate change, it's irrelevant to
MTHP_STAT_ZSWPOUT being added.
>
> Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@...el.com>
> ---
> include/linux/huge_mm.h | 1 +
> mm/huge_memory.c | 3 +++
> mm/page_io.c | 3 ++-
> 3 files changed, 6 insertions(+), 1 deletion(-)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 4da102b74a8c..8b690328e78b 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -118,6 +118,7 @@ enum mthp_stat_item {
> MTHP_STAT_ANON_FAULT_ALLOC,
> MTHP_STAT_ANON_FAULT_FALLBACK,
> MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
> + MTHP_STAT_ZSWPOUT,
> MTHP_STAT_SWPOUT,
> MTHP_STAT_SWPOUT_FALLBACK,
> MTHP_STAT_SHMEM_ALLOC,
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 15418ffdd377..ad921c4b2ad8 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -587,6 +587,7 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
> DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
> DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
> DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
> +DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
> DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
> DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
> #ifdef CONFIG_SHMEM
> @@ -605,6 +606,7 @@ static struct attribute *anon_stats_attrs[] = {
> &anon_fault_fallback_attr.attr,
> &anon_fault_fallback_charge_attr.attr,
> #ifndef CONFIG_SHMEM
> + &zswpout_attr.attr,
> &swpout_attr.attr,
> &swpout_fallback_attr.attr,
> #endif
> @@ -637,6 +639,7 @@ static struct attribute_group file_stats_attr_grp = {
>
> static struct attribute *any_stats_attrs[] = {
> #ifdef CONFIG_SHMEM
> + &zswpout_attr.attr,
> &swpout_attr.attr,
> &swpout_fallback_attr.attr,
> #endif
> diff --git a/mm/page_io.c b/mm/page_io.c
> index b6f1519d63b0..26106e745d73 100644
> --- a/mm/page_io.c
> +++ b/mm/page_io.c
> @@ -289,6 +289,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
> swap_zeromap_folio_clear(folio);
> }
> if (zswap_store(folio)) {
> + count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT);
> folio_unlock(folio);
> return 0;
> }
> @@ -308,8 +309,8 @@ static inline void count_swpout_vm_event(struct folio *folio)
> count_memcg_folio_events(folio, THP_SWPOUT, 1);
> count_vm_event(THP_SWPOUT);
> }
> - count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
> #endif
> + count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
> count_vm_events(PSWPOUT, folio_nr_pages(folio));
> }
>
> --
> 2.27.0
>
Powered by blists - more mailing lists