[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0037de9e-bd64-4203-960d-bbc31fff611f@linux.alibaba.com>
Date: Fri, 12 Jul 2024 10:31:07 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: Ryan Roberts <ryan.roberts@....com>,
Andrew Morton <akpm@...ux-foundation.org>, Hugh Dickins <hughd@...gle.com>,
Jonathan Corbet <corbet@....net>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
David Hildenbrand <david@...hat.com>, Barry Song <baohua@...nel.org>,
Lance Yang <ioworker0@...il.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH v1 1/2] mm: Cleanup count_mthp_stat() definition
On 2024/7/11 15:29, Ryan Roberts wrote:
> Let's move count_mthp_stat() so that it's always defined, even when THP
> is disabled. Previously uses of the function in files such as shmem.c,
> which are compiled even when THP is disabled, required ugly THP
> ifdeferry. With this cleanup, we can remove those ifdefs and the
> function resolves to a nop when THP is disabled.
>
> I shortly plan to call count_mthp_stat() from more THP-invariant source
> files.
>
> Signed-off-by: Ryan Roberts <ryan.roberts@....com>
LGTM.
Reviewed-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
> ---
> include/linux/huge_mm.h | 70 ++++++++++++++++++++---------------------
> mm/memory.c | 2 --
> mm/shmem.c | 6 ----
> 3 files changed, 35 insertions(+), 43 deletions(-)
>
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index cff002be83eb..cb93b9009ce4 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -108,6 +108,41 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
> #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
> #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
>
> +enum mthp_stat_item {
> + MTHP_STAT_ANON_FAULT_ALLOC,
> + MTHP_STAT_ANON_FAULT_FALLBACK,
> + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
> + MTHP_STAT_SWPOUT,
> + MTHP_STAT_SWPOUT_FALLBACK,
> + MTHP_STAT_SHMEM_ALLOC,
> + MTHP_STAT_SHMEM_FALLBACK,
> + MTHP_STAT_SHMEM_FALLBACK_CHARGE,
> + MTHP_STAT_SPLIT,
> + MTHP_STAT_SPLIT_FAILED,
> + MTHP_STAT_SPLIT_DEFERRED,
> + __MTHP_STAT_COUNT
> +};
> +
> +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
> +struct mthp_stat {
> + unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
> +};
> +
> +DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
> +
> +static inline void count_mthp_stat(int order, enum mthp_stat_item item)
> +{
> + if (order <= 0 || order > PMD_ORDER)
> + return;
> +
> + this_cpu_inc(mthp_stats.stats[order][item]);
> +}
> +#else
> +static inline void count_mthp_stat(int order, enum mthp_stat_item item)
> +{
> +}
> +#endif
> +
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>
> extern unsigned long transparent_hugepage_flags;
> @@ -263,41 +298,6 @@ struct thpsize {
>
> #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
>
> -enum mthp_stat_item {
> - MTHP_STAT_ANON_FAULT_ALLOC,
> - MTHP_STAT_ANON_FAULT_FALLBACK,
> - MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
> - MTHP_STAT_SWPOUT,
> - MTHP_STAT_SWPOUT_FALLBACK,
> - MTHP_STAT_SHMEM_ALLOC,
> - MTHP_STAT_SHMEM_FALLBACK,
> - MTHP_STAT_SHMEM_FALLBACK_CHARGE,
> - MTHP_STAT_SPLIT,
> - MTHP_STAT_SPLIT_FAILED,
> - MTHP_STAT_SPLIT_DEFERRED,
> - __MTHP_STAT_COUNT
> -};
> -
> -struct mthp_stat {
> - unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
> -};
> -
> -#ifdef CONFIG_SYSFS
> -DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
> -
> -static inline void count_mthp_stat(int order, enum mthp_stat_item item)
> -{
> - if (order <= 0 || order > PMD_ORDER)
> - return;
> -
> - this_cpu_inc(mthp_stats.stats[order][item]);
> -}
> -#else
> -static inline void count_mthp_stat(int order, enum mthp_stat_item item)
> -{
> -}
> -#endif
> -
> #define transparent_hugepage_use_zero_page() \
> (transparent_hugepage_flags & \
> (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
> diff --git a/mm/memory.c b/mm/memory.c
> index 802d0d8a40f9..a50fdefb8f0b 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4597,9 +4597,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
>
> folio_ref_add(folio, nr_pages - 1);
> add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC);
> -#endif
> folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
> folio_add_lru_vma(folio, vma);
> setpte:
> diff --git a/mm/shmem.c b/mm/shmem.c
> index f24dfbd387ba..fce1343f44e6 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1776,9 +1776,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
>
> if (pages == HPAGE_PMD_NR)
> count_vm_event(THP_FILE_FALLBACK);
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK);
> -#endif
> order = next_order(&suitable_orders, order);
> }
> } else {
> @@ -1803,10 +1801,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
> count_vm_event(THP_FILE_FALLBACK);
> count_vm_event(THP_FILE_FALLBACK_CHARGE);
> }
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK);
> count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE);
> -#endif
> }
> goto unlock;
> }
> @@ -2180,9 +2176,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
> if (!IS_ERR(folio)) {
> if (folio_test_pmd_mappable(folio))
> count_vm_event(THP_FILE_ALLOC);
> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC);
> -#endif
> goto alloced;
> }
> if (PTR_ERR(folio) == -EEXIST)
Powered by blists - more mailing lists