lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <BB3BDA79-3185-4346-9260-BA5E1B9C9949@nvidia.com>
Date: Thu, 29 May 2025 11:21:11 -0400
From: Zi Yan <ziy@...dia.com>
To: Baolin Wang <baolin.wang@...ux.alibaba.com>
Cc: akpm@...ux-foundation.org, hughd@...gle.com, david@...hat.com,
 lorenzo.stoakes@...cle.com, Liam.Howlett@...cle.com, npache@...hat.com,
 ryan.roberts@....com, dev.jain@....com, linux-mm@...ck.org,
 linux-fsdevel@...r.kernel.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/2] mm: shmem: disallow hugepages if the system-wide
 shmem THP sysfs settings are disabled

On 29 May 2025, at 4:23, Baolin Wang wrote:

> The MADV_COLLAPSE will ignore the system-wide shmem THP sysfs settings, which
> means that even though we have disabled the shmem THP configuration, MADV_COLLAPSE
> will still attempt to collapse into a shmem THP. This violates the rule we have
> agreed upon: never means never.
>
> Then the current strategy is:
> For shmem, if none of always, madvise, within_size, and inherit have enabled
> PMD-sized mTHP, then MADV_COLLAPSE will be prohibited from collapsing PMD-sized mTHP.
>
> For tmpfs, if the mount option is set with the 'huge=never' parameter, then
> MADV_COLLAPSE will be prohibited from collapsing PMD-sized mTHP.
>
> Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
> ---
>  mm/huge_memory.c |  2 +-
>  mm/shmem.c       | 12 ++++++------
>  2 files changed, 7 insertions(+), 7 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index d3e66136e41a..a8cfa37cae72 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -166,7 +166,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
>  	 * own flags.
>  	 */
>  	if (!in_pf && shmem_file(vma->vm_file))
> -		return shmem_allowable_huge_orders(file_inode(vma->vm_file),
> +		return orders & shmem_allowable_huge_orders(file_inode(vma->vm_file),
>  						   vma, vma->vm_pgoff, 0,
>  						   !enforce_sysfs);

OK, here orders is checked against allowed orders.

>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 4b42419ce6b2..4dbb28d85cd9 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -613,7 +613,7 @@ static unsigned int shmem_get_orders_within_size(struct inode *inode,
>  }
>
>  static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
> -					      loff_t write_end, bool shmem_huge_force,
> +					      loff_t write_end,
>  					      struct vm_area_struct *vma,
>  					      unsigned long vm_flags)
>  {
> @@ -625,7 +625,7 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
>  		return 0;
>  	if (shmem_huge == SHMEM_HUGE_DENY)
>  		return 0;
> -	if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
> +	if (shmem_huge == SHMEM_HUGE_FORCE)
>  		return maybe_pmd_order;

shmem_huge is set by sysfs?

>
>  	/*
> @@ -860,7 +860,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
>  }
>
>  static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
> -					      loff_t write_end, bool shmem_huge_force,
> +					      loff_t write_end,
>  					      struct vm_area_struct *vma,
>  					      unsigned long vm_flags)
>  {
> @@ -1261,7 +1261,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
>  			STATX_ATTR_NODUMP);
>  	generic_fillattr(idmap, request_mask, inode, stat);
>
> -	if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
> +	if (shmem_huge_global_enabled(inode, 0, 0, NULL, 0))
>  		stat->blksize = HPAGE_PMD_SIZE;
>
>  	if (request_mask & STATX_BTIME) {
> @@ -1768,7 +1768,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
>  		return 0;
>
>  	global_orders = shmem_huge_global_enabled(inode, index, write_end,
> -						  shmem_huge_force, vma, vm_flags);
> +						  vma, vm_flags);
>  	/* Tmpfs huge pages allocation */
>  	if (!vma || !vma_is_anon_shmem(vma))
>  		return global_orders;
> @@ -1790,7 +1790,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
>  	/* Allow mTHP that will be fully within i_size. */
>  	mask |= shmem_get_orders_within_size(inode, within_size_orders, index, 0);
>
> -	if (vm_flags & VM_HUGEPAGE)
> +	if (shmem_huge_force || (vm_flags & VM_HUGEPAGE))
>  		mask |= READ_ONCE(huge_shmem_orders_madvise);
>
>  	if (global_orders > 0)
> -- 
> 2.43.5

shmem_huge_force comes from !enforce_sysfs in __thp_vma_allowable_orders().
Do you know when sysfs is not enforced and why?

Best Regards,
Yan, Zi

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ