lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <17189a00-25c4-4b96-a468-84998ef17a77@oracle.com>
Date: Mon, 21 Apr 2025 11:08:11 -0400
From: Sidhartha Kumar <sidhartha.kumar@...cle.com>
To: nifan.cxl@...il.com, muchun.song@...ux.dev, willy@...radead.org
Cc: mcgrof@...nel.org, a.manzanares@...sung.com, dave@...olabs.net,
        akpm@...ux-foundation.org, david@...hat.com, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, Fan Ni <fan.ni@...sung.com>
Subject: Re: [PATCH v2 4/4] mm/hugetlb: Convert use of struct page to folio in
 __unmap_hugepage_range()

On 4/18/25 12:57 PM, nifan.cxl@...il.com wrote:
> From: Fan Ni <fan.ni@...sung.com>
> 
> In __unmap_hugepage_range(), the "page" pointer always points to the
> first page of a huge page, which guarantees there is a folio associating
> with it.  Convert the "page" pointer to use folio.
> 
> Signed-off-by: Fan Ni <fan.ni@...sung.com>
> ---
> This is a new patch added to the series based on the discussion here:
> https://lore.kernel.org/linux-mm/aAHUluy7T32ZlYg7@debian/T/#m2b9cc1743e1907e52658815b297b9d249474f387
> ---
>   mm/hugetlb.c | 18 +++++++++---------
>   1 file changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 7d280ab23784..8177a3fe47d7 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -5840,7 +5840,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
>   	pte_t *ptep;
>   	pte_t pte;
>   	spinlock_t *ptl;
> -	struct page *page;
> +	struct folio *folio;
>   	struct hstate *h = hstate_vma(vma);
>   	unsigned long sz = huge_page_size(h);
>   	bool adjust_reservation = false;
> @@ -5904,14 +5904,14 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
>   			continue;
>   		}
>   
> -		page = pte_page(pte);
> +		folio = page_folio(pte_page(pte));
>   		/*
>   		 * If a reference page is supplied, it is because a specific
>   		 * page is being unmapped, not a range. Ensure the page we
>   		 * are about to unmap is the actual page of interest.
>   		 */
>   		if (ref_folio) {
> -			if (page != folio_page(ref_folio, 0)) {
> +			if (folio != ref_folio) {
>   				spin_unlock(ptl);
>   				continue;
>   			}
> @@ -5926,7 +5926,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
>   		pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
>   		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
>   		if (huge_pte_dirty(pte))
> -			set_page_dirty(page);
> +			folio_mark_dirty(folio);
>   		/* Leave a uffd-wp pte marker if needed */
>   		if (huge_pte_uffd_wp(pte) &&
>   		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
> @@ -5934,7 +5934,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
>   					make_pte_marker(PTE_MARKER_UFFD_WP),
>   					sz);
>   		hugetlb_count_sub(pages_per_huge_page(h), mm);
> -		hugetlb_remove_rmap(page_folio(page));
> +		hugetlb_remove_rmap(folio);
>   
>   		/*
>   		 * Restore the reservation for anonymous page, otherwise the
> @@ -5943,8 +5943,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
>   		 * reservation bit.
>   		 */
>   		if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
> -		    folio_test_anon(page_folio(page))) {
> -			folio_set_hugetlb_restore_reserve(page_folio(page));
> +		    folio_test_anon(folio)) {
> +			folio_set_hugetlb_restore_reserve(folio);
>   			/* Reservation to be adjusted after the spin lock */
>   			adjust_reservation = true;
>   		}
> @@ -5968,12 +5968,12 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
>   				 * count will not be incremented by free_huge_folio.
>   				 * Act as if we consumed the reservation.
>   				 */
> -				folio_clear_hugetlb_restore_reserve(page_folio(page));
> +				folio_clear_hugetlb_restore_reserve(folio);
>   			else if (rc)
>   				vma_add_reservation(h, vma, address);
>   		}
>   
> -		tlb_remove_page_size(tlb, page, huge_page_size(h));
> +		tlb_remove_page_size(tlb, folio_page(folio, 0), huge_page_size(h));
>   		/*
>   		 * Bail out after unmapping reference page if supplied
>   		 */
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@...cle.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ