lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aFP56mLztZl8wI-K@localhost.localdomain>
Date: Thu, 19 Jun 2025 13:52:10 +0200
From: Oscar Salvador <osalvador@...e.de>
To: David Hildenbrand <david@...hat.com>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
	Muchun Song <muchun.song@...ux.dev>,
	James Houghton <jthoughton@...gle.com>,
	Peter Xu <peterx@...hat.com>, Gavin Guo <gavinguo@...lia.com>,
	linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/5] mm,hugetlb: Document the reason to lock the folio in
 the faulting path

On Tue, Jun 17, 2025 at 03:42:09PM +0200, David Hildenbrand wrote:
> Yes. As an alternative, keep locking it in the caller and only unlock in the
> !anon case?

This is what I came up with:

What do you think?

I just made sure that all hugetlb-LTP tests pass fine (after I fixed an
obvious mistake :-S)

 From 3a0c53a00511abdcf5df53491bbb9295973f24f9 Mon Sep 17 00:00:00 2001
 From: Oscar Salvador <osalvador@...e.de>
 Date: Wed, 11 Jun 2025 10:05:34 +0200
 Subject: [PATCH] mm,hugetlb: Sort out folio locking in the faulting path
 
 Recent conversations showed that there was a misunderstanding about why we
 were locking the folio prior to call in hugetlb_wp().
 In fact, as soon as we have the folio mapped into the pagetables, we no longer
 need to hold it locked, because we know that no concurrent truncation could have
 happened.
 There is only one case where the folio needs to be locked, and that is when we
 are handling an anonymous folio, because hugetlb_wp() will check whether it can
 re-use it exclusively for the process that is faulting it in.
 
 So, pass the folio locked to hugetlb_wp() when that is the case.
 
 Suggested-by: David Hildenbrand <david@...hat.com>
 Signed-off-by: Oscar Salvador <osalvador@...e.de>
 ---
  mm/hugetlb.c | 43 +++++++++++++++++++++++++++++++++----------
  1 file changed, 33 insertions(+), 10 deletions(-)
 
 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
 index 175edafeec67..04049d0fb70d 100644
 --- a/mm/hugetlb.c
 +++ b/mm/hugetlb.c
 @@ -6437,6 +6437,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
  	pte_t new_pte;
  	bool new_folio, new_pagecache_folio = false;
  	u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
 +	bool folio_locked = true;
  
  	/*
  	 * Currently, we are forced to kill the process in the event the
 @@ -6602,6 +6603,11 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
  
  	hugetlb_count_add(pages_per_huge_page(h), mm);
  	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
 +		/* No need to lock file folios. See comment in hugetlb_fault() */
 +		if (!anon_rmap) {
 +			folio_locked = false;
 +			folio_unlock(folio);
 +		}
  		/* Optimization, do the COW without a second fault */
  		ret = hugetlb_wp(vmf);
  	}
 @@ -6616,7 +6622,8 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
  	if (new_folio)
  		folio_set_hugetlb_migratable(folio);
  
 -	folio_unlock(folio);
 +	if (folio_locked)
 +		folio_unlock(folio);
  out:
  	hugetlb_vma_unlock_read(vma);
  
 @@ -6636,7 +6643,8 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
  	if (new_folio && !new_pagecache_folio)
  		restore_reserve_on_error(h, vma, vmf->address, folio);
  
 -	folio_unlock(folio);
 +	if (folio_locked)
 +		folio_unlock(folio);
  	folio_put(folio);
  	goto out;
  }
 @@ -6670,7 +6678,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  {
  	vm_fault_t ret;
  	u32 hash;
 -	struct folio *folio;
 +	struct folio *folio = NULL;
  	struct hstate *h = hstate_vma(vma);
  	struct address_space *mapping;
  	struct vm_fault vmf = {
 @@ -6687,6 +6695,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  		 * be hard to debug if called functions make assumptions
  		 */
  	};
 +	bool folio_locked = false;
  
  	/*
  	 * Serialize hugepage allocation and instantiation, so that we don't
 @@ -6801,13 +6810,24 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  		/* Fallthrough to CoW */
  	}
  
 -	/* hugetlb_wp() requires page locks of pte_page(vmf.orig_pte) */
 -	folio = page_folio(pte_page(vmf.orig_pte));
 -	folio_lock(folio);
 -	folio_get(folio);
 -
  	if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
  		if (!huge_pte_write(vmf.orig_pte)) {
 +			/*
 +			 * Anonymous folios need to be lock since hugetlb_wp()
 +			 * checks whether we can re-use it exclusively for us in
 +			 * case we are the only user.
 +			 */
 +			folio = page_folio(pte_page(vmf.orig_pte));
 +			folio_get(folio);
 +			if (folio_test_anon(folio)) {
 +				spin_unlock(vmf.ptl);
 +				folio_lock(folio);
 +				folio_locked = true;
 +				spin_lock(vmf.ptl);
 +				if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm,
 +						   vmf.address, vmf.pte))))
 +					goto out_put_page;
 +			}
  			ret = hugetlb_wp(&vmf);
  			goto out_put_page;
  		} else if (likely(flags & FAULT_FLAG_WRITE)) {
 @@ -6819,8 +6839,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  						flags & FAULT_FLAG_WRITE))
  		update_mmu_cache(vma, vmf.address, vmf.pte);
  out_put_page:
 -	folio_unlock(folio);
 -	folio_put(folio);
 +	if (folio) {
 +		if (folio_locked)
 +			folio_unlock(folio);
 +		folio_put(folio);
 +	}
  out_ptl:
  	spin_unlock(vmf.ptl);
  out_mutex:
 -- 
 2.49.0

 

-- 
Oscar Salvador
SUSE Labs

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ