lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <00b95366-aa42-4051-9457-04a009aedbb2@linux.alibaba.com>
Date: Mon, 30 Jun 2025 12:47:43 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: Kairui Song <kasong@...cent.com>, linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>, Hugh Dickins
 <hughd@...gle.com>, Matthew Wilcox <willy@...radead.org>,
 Kemeng Shi <shikemeng@...weicloud.com>, Chris Li <chrisl@...nel.org>,
 Nhat Pham <nphamcs@...il.com>, Baoquan He <bhe@...hat.com>,
 Barry Song <baohua@...nel.org>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v3 3/7] mm/shmem, swap: tidy up THP swapin checks



On 2025/6/27 14:20, Kairui Song wrote:
> From: Kairui Song <kasong@...cent.com>
> 
> Move all THP swapin related checks under CONFIG_TRANSPARENT_HUGEPAGE,
> so they will be trimmed off by the compiler if not needed.
> 
> And add a WARN if shmem sees a order > 0 entry when
> CONFIG_TRANSPARENT_HUGEPAGE is disabled, that should never happen unless
> things went very wrong.
> 
> There should be no observable feature change except the new added WARN.
> 
> Signed-off-by: Kairui Song <kasong@...cent.com>

LGTM. Thanks.
Reviewed-by: Baolin Wang <baolin.wang@...ux.alibaba.com>

> ---
>   mm/shmem.c | 42 ++++++++++++++++++++----------------------
>   1 file changed, 20 insertions(+), 22 deletions(-)
> 
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 033dc7a3435d..f85a985167c5 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1980,26 +1980,39 @@ static struct folio *shmem_swap_alloc_folio(struct inode *inode,
>   		swp_entry_t entry, int order, gfp_t gfp)
>   {
>   	struct shmem_inode_info *info = SHMEM_I(inode);
> +	int nr_pages = 1 << order;
>   	struct folio *new;
>   	void *shadow;
> -	int nr_pages;
>   
>   	/*
>   	 * We have arrived here because our zones are constrained, so don't
>   	 * limit chance of success with further cpuset and node constraints.
>   	 */
>   	gfp &= ~GFP_CONSTRAINT_MASK;
> -	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && order > 0) {
> -		gfp_t huge_gfp = vma_thp_gfp_mask(vma);
> -
> -		gfp = limit_gfp_mask(huge_gfp, gfp);
> +	if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
> +		if (WARN_ON_ONCE(order))
> +			return ERR_PTR(-EINVAL);
> +	} else if (order) {
> +		/*
> +		 * If uffd is active for the vma, we need per-page fault
> +		 * fidelity to maintain the uffd semantics, then fallback
> +		 * to swapin order-0 folio, as well as for zswap case.
> +		 * Any existing sub folio in the swap cache also blocks
> +		 * mTHP swapin.
> +		 */
> +		if ((vma && unlikely(userfaultfd_armed(vma))) ||
> +		     !zswap_never_enabled() ||
> +		     non_swapcache_batch(entry, nr_pages) != nr_pages) {
> +			return ERR_PTR(-EINVAL);
> +		} else {
> +			gfp = limit_gfp_mask(vma_thp_gfp_mask(vma), gfp);
> +		}
>   	}
>   
>   	new = shmem_alloc_folio(gfp, order, info, index);
>   	if (!new)
>   		return ERR_PTR(-ENOMEM);
>   
> -	nr_pages = folio_nr_pages(new);
>   	if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL,
>   					   gfp, entry)) {
>   		folio_put(new);
> @@ -2283,9 +2296,6 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
>   	/* Look it up and read it in.. */
>   	folio = swap_cache_get_folio(swap, NULL, 0);
>   	if (!folio) {
> -		int nr_pages = 1 << order;
> -		bool fallback_order0 = false;
> -
>   		/* Or update major stats only when swapin succeeds?? */
>   		if (fault_type) {
>   			*fault_type |= VM_FAULT_MAJOR;
> @@ -2293,20 +2303,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
>   			count_memcg_event_mm(fault_mm, PGMAJFAULT);
>   		}
>   
> -		/*
> -		 * If uffd is active for the vma, we need per-page fault
> -		 * fidelity to maintain the uffd semantics, then fallback
> -		 * to swapin order-0 folio, as well as for zswap case.
> -		 * Any existing sub folio in the swap cache also blocks
> -		 * mTHP swapin.
> -		 */
> -		if (order > 0 && ((vma && unlikely(userfaultfd_armed(vma))) ||
> -				  !zswap_never_enabled() ||
> -				  non_swapcache_batch(swap, nr_pages) != nr_pages))
> -			fallback_order0 = true;
> -
>   		/* Skip swapcache for synchronous device. */
> -		if (!fallback_order0 && data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
> +		if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
>   			folio = shmem_swap_alloc_folio(inode, vma, index, swap, order, gfp);
>   			if (!IS_ERR(folio)) {
>   				skip_swapcache = true;


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ