[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b2e6fac2-7c23-4a92-8d89-ff29b8566967@linux.alibaba.com>
Date: Mon, 1 Jul 2024 14:47:46 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: Bang Li <libang.li@...group.com>, hughd@...gle.com,
akpm@...ux-foundation.org
Cc: david@...hat.com, ryan.roberts@....com, wangkefeng.wang@...wei.com,
ziy@...dia.com, linux-kernel@...r.kernel.org, linux-mm@...ck.org,
Barry Song <21cnbao@...il.com>
Subject: Re: [PATCH] support "THPeligible" semantics for mTHP with anonymous
shmem
CC Barry.
On 2024/6/28 18:49, Bang Li wrote:
> After the commit 7fb1b252afb5 ("mm: shmem: add mTHP support for
> anonymous shmem"), we can configure different policies through
> the multi-size THP sysfs interface for anonymous shmem. But
> currently "THPeligible" indicates only whether the mapping is
> eligible for allocating THP-pages as well as the THP is PMD
> mappable or not for anonymous shmem, we need to support semantics
> for mTHP with anonymous shmem similar to those for mTHP with
> anonymous memory.
I did not see a consensus that "THP*" related statistics should contain
mTHP in previous discussion [1].
In addition, if we all agree that "THPeligible" should include mTHP
statistics, you should update the corresponding documentation to keep
consistency.
[1]
https://lore.kernel.org/linux-mm/202406262300.iAURISyJ-lkp@intel.com/T/#md7a77056110cebcc2a9b3cd7e4a8d682667f6ba5
> Signed-off-by: Bang Li <libang.li@...group.com>
> ---
> fs/proc/task_mmu.c | 10 +++++++---
> include/linux/huge_mm.h | 11 +++++++++++
> mm/shmem.c | 9 +--------
> 3 files changed, 19 insertions(+), 11 deletions(-)
>
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 93fb2c61b154..09b5db356886 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -870,6 +870,7 @@ static int show_smap(struct seq_file *m, void *v)
> {
> struct vm_area_struct *vma = v;
> struct mem_size_stats mss = {};
> + bool thp_eligible;
>
> smap_gather_stats(vma, &mss, 0);
>
> @@ -882,9 +883,12 @@ static int show_smap(struct seq_file *m, void *v)
>
> __show_smap(m, &mss, false);
>
> - seq_printf(m, "THPeligible: %8u\n",
> - !!thp_vma_allowable_orders(vma, vma->vm_flags,
> - TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
> + thp_eligible = !!thp_vma_allowable_orders(vma, vma->vm_flags,
> + TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL);
> + if (vma_is_anon_shmem(vma))
> + thp_eligible = !!shmem_allowable_huge_orders(file_inode(vma->vm_file),
> + vma, vma->vm_pgoff, thp_eligible);
> + seq_printf(m, "THPeligible: %8u\n", thp_eligible);
>
> if (arch_pkeys_enabled())
> seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 212cca384d7e..f87136f38aa1 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -267,6 +267,10 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
> return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
> }
>
> +unsigned long shmem_allowable_huge_orders(struct inode *inode,
> + struct vm_area_struct *vma, pgoff_t index,
> + bool global_huge);
> +
> struct thpsize {
> struct kobject kobj;
> struct list_head node;
> @@ -460,6 +464,13 @@ static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
> return 0;
> }
>
> +static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
> + struct vm_area_struct *vma, pgoff_t index,
> + bool global_huge)
> +{
> + return 0;
> +}
> +
> #define transparent_hugepage_flags 0UL
>
> #define thp_get_unmapped_area NULL
> diff --git a/mm/shmem.c b/mm/shmem.c
> index d495c0701a83..aa85df9c662a 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1622,7 +1622,7 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
> }
>
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -static unsigned long shmem_allowable_huge_orders(struct inode *inode,
> +unsigned long shmem_allowable_huge_orders(struct inode *inode,
> struct vm_area_struct *vma, pgoff_t index,
> bool global_huge)
> {
> @@ -1707,13 +1707,6 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
> return orders;
> }
> #else
> -static unsigned long shmem_allowable_huge_orders(struct inode *inode,
> - struct vm_area_struct *vma, pgoff_t index,
> - bool global_huge)
> -{
> - return 0;
> -}
> -
> static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
> struct address_space *mapping, pgoff_t index,
> unsigned long orders)
Powered by blists - more mailing lists