[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <f206ad30-a007-499e-941c-1c4abc0c5eb3@linux.alibaba.com>
Date: Mon, 22 Jul 2024 10:41:14 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: Ryan Roberts <ryan.roberts@....com>, akpm@...ux-foundation.org,
hughd@...gle.com
Cc: willy@...radead.org, david@...hat.com, 21cnbao@...il.com, ziy@...dia.com,
ioworker0@...il.com, linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 3/3] mm: shmem: move shmem_huge_global_enabled() into
shmem_allowable_huge_orders()
(Sorry for the late reply due to my vacation.)
On 2024/7/15 21:36, Ryan Roberts wrote:
> On 13/07/2024 14:24, Baolin Wang wrote:
>> Move shmem_huge_global_enabled() into the shmem_allowable_huge_orders() function,
>> so that shmem_allowable_huge_orders() can also help to find the allowable huge
>> orders for tmpfs. Moreover the shmem_huge_global_enabled() can become static.
>>
>> No functional changes.
>>
>> Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
>
> one nit below, but either way:
>
> Reviewed-by: Ryan Roberts <ryan.roberts@....com>
>
>> ---
>> include/linux/shmem_fs.h | 12 ++----------
>> mm/huge_memory.c | 12 +++---------
>> mm/shmem.c | 41 ++++++++++++++++++++++++++--------------
>> 3 files changed, 32 insertions(+), 33 deletions(-)
>>
>> diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
>> index 405ee8d3589a..1564d7d3ca61 100644
>> --- a/include/linux/shmem_fs.h
>> +++ b/include/linux/shmem_fs.h
>> @@ -111,21 +111,13 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
>> int shmem_unuse(unsigned int type);
>>
>> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> -extern bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force,
>> - struct mm_struct *mm, unsigned long vm_flags);
>> unsigned long shmem_allowable_huge_orders(struct inode *inode,
>> struct vm_area_struct *vma, pgoff_t index,
>> - bool global_huge);
>> + bool shmem_huge_force);
>> #else
>> -static __always_inline bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
>> - bool shmem_huge_force, struct mm_struct *mm,
>> - unsigned long vm_flags)
>> -{
>> - return false;
>> -}
>> static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
>> struct vm_area_struct *vma, pgoff_t index,
>> - bool global_huge)
>> + bool shmem_huge_force)
>> {
>> return 0;
>> }
>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>> index cc9bad12be75..f69980b5b5fc 100644
>> --- a/mm/huge_memory.c
>> +++ b/mm/huge_memory.c
>> @@ -151,16 +151,10 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
>> * Must be done before hugepage flags check since shmem has its
>> * own flags.
>> */
>> - if (!in_pf && shmem_file(vma->vm_file)) {
>> - bool global_huge = shmem_huge_global_enabled(file_inode(vma->vm_file),
>> - vma->vm_pgoff, !enforce_sysfs,
>> - vma->vm_mm, vm_flags);
>> -
>> - if (!vma_is_anon_shmem(vma))
>> - return global_huge ? orders : 0;
>> + if (!in_pf && shmem_file(vma->vm_file))
>> return shmem_allowable_huge_orders(file_inode(vma->vm_file),
>> - vma, vma->vm_pgoff, global_huge);
>> - }
>> + vma, vma->vm_pgoff,
>> + !enforce_sysfs);
>>
>> if (!vma_is_anonymous(vma)) {
>> /*
>> diff --git a/mm/shmem.c b/mm/shmem.c
>> index 1445dcd39b6f..4d274f5a17d9 100644
>> --- a/mm/shmem.c
>> +++ b/mm/shmem.c
>> @@ -581,7 +581,7 @@ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
>> }
>> }
>>
>> -bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
>> +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
>> bool shmem_huge_force, struct mm_struct *mm,
>> unsigned long vm_flags)
>> {
>> @@ -772,6 +772,13 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
>> {
>> return 0;
>> }
>> +
>> +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
>> + bool shmem_huge_force, struct mm_struct *mm,
>> + unsigned long vm_flags)
>> +{
>> + return false;
>> +}
>> #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>>
>> /*
>> @@ -1625,27 +1632,39 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
>> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>> unsigned long shmem_allowable_huge_orders(struct inode *inode,
>> struct vm_area_struct *vma, pgoff_t index,
>> - bool global_huge)
>> + bool shmem_huge_force)
>> {
>> unsigned long mask = READ_ONCE(huge_shmem_orders_always);
>> unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
>> - unsigned long vm_flags = vma->vm_flags;
>> + unsigned long vm_flags = vma ? vma->vm_flags : 0;
>> + struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
>
> nit: rather than deriving the fault_mm here, I wonder if its cleaner to just
> pass vma to shmem_huge_global_enabled()? shmem_huge_global_enabled() is just
> using it as a guard to access vm_flags, which you can just as easily do by
> testing the vma for non-NULL. And you can access mm flags with vma->vm_mm->flags
> after testing the vma too.
Make sense to me, and will do in next version.
Thanks for reviewing.
Powered by blists - more mailing lists