lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2360dd36-bbde-4e7c-8054-24a9e4a6962b@lucifer.local>
Date: Wed, 6 Aug 2025 11:45:13 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: David Hildenbrand <david@...hat.com>
Cc: Dev Jain <dev.jain@....com>, akpm@...ux-foundation.org,
        ryan.roberts@....com, willy@...radead.org, linux-mm@...ck.org,
        linux-kernel@...r.kernel.org, catalin.marinas@....com, will@...nel.org,
        Liam.Howlett@...cle.com, vbabka@...e.cz, jannh@...gle.com,
        anshuman.khandual@....com, peterx@...hat.com, joey.gouly@....com,
        ioworker0@...il.com, baohua@...nel.org, kevin.brodsky@....com,
        quic_zhenhuah@...cinc.com, christophe.leroy@...roup.eu,
        yangyicong@...ilicon.com, linux-arm-kernel@...ts.infradead.org,
        hughd@...gle.com, yang@...amperecomputing.com, ziy@...dia.com
Subject: Re: [PATCH v5 6/7] mm: Optimize mprotect() by PTE batching

On Wed, Aug 06, 2025 at 12:11:33PM +0200, David Hildenbrand wrote:
> On 06.08.25 11:50, Lorenzo Stoakes wrote:
> > On Wed, Aug 06, 2025 at 03:07:49PM +0530, Dev Jain wrote:
> > > > >
> > > > > You mean in _this_ PTE of the batch right? As we're invoking these
> > > > > on each part
> > > > > of the PTE table.
> > > > >
> > > > > I mean I guess we can simply do:
> > > > >
> > > > >      struct page *first_page = pte_page(ptent);
> > > > >
> > > > > Right?
> > > >
> > > > Yes, but we should forward the result from vm_normal_page(), which does
> > > > exactly that for you, and increment the page accordingly as required,
> > > > just like with the pte we are processing.
> > >
> > > Makes sense, so I guess I will have to change the signature of
> > > prot_numa_skip()
> > >
> > > to pass a double ptr to a page instead of folio and derive the folio in the
> > > caller,
> > >
> > > and pass down both the folio and the page to
> > > set_write_prot_commit_flush_ptes.
> >
> > I already don't love how we psas the folio back from there for very dubious
> > benefit. I really hate the idea of having a struct **page parameter...
> >
> > I wonder if we should just have a quick fixup for hotfix, and refine this more
> > later?
>
> This is not an issue in any released kernel, so we can do this properly.
>
> We should just remove that nested vm_normal_folio().
>
> Untested, but should give an idea what we can do.
>
>
> diff --git a/mm/mprotect.c b/mm/mprotect.c
> index 78bded7acf795..4e0a22f7db495 100644
> --- a/mm/mprotect.c
> +++ b/mm/mprotect.c
> @@ -120,7 +120,7 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
>  static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
>  			   pte_t oldpte, pte_t *pte, int target_node,
> -			   struct folio **foliop)
> +			   struct folio *folio)
>  {
>  	struct folio *folio = NULL;
>  	bool ret = true;
> @@ -131,7 +131,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
>  	if (pte_protnone(oldpte))
>  		goto skip;
> -	folio = vm_normal_folio(vma, addr, oldpte);
>  	if (!folio)
>  		goto skip;
> @@ -172,8 +171,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
>  	if (folio_use_access_time(folio))
>  		folio_xchg_access_time(folio, jiffies_to_msecs(jiffies));
> -skip:
> -	*foliop = folio;
>  	return ret;
>  }
> @@ -231,10 +228,9 @@ static int page_anon_exclusive_sub_batch(int start_idx, int max_len,
>   * retrieve sub-batches.
>   */
>  static void commit_anon_folio_batch(struct vm_area_struct *vma,
> -		struct folio *folio, unsigned long addr, pte_t *ptep,
> +		struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep,
>  		pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
>  {
> -	struct page *first_page = folio_page(folio, 0);
>  	bool expected_anon_exclusive;
>  	int sub_batch_idx = 0;
>  	int len;
> @@ -243,7 +239,7 @@ static void commit_anon_folio_batch(struct vm_area_struct *vma,
>  		expected_anon_exclusive = PageAnonExclusive(first_page + sub_batch_idx);
>  		len = page_anon_exclusive_sub_batch(sub_batch_idx, nr_ptes,
>  					first_page, expected_anon_exclusive);
> -		prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, len,
> +		prot_commit_flush_ptes(vma, addr, ptep, page, oldpte, ptent, len,
>  				       sub_batch_idx, expected_anon_exclusive, tlb);
>  		sub_batch_idx += len;
>  		nr_ptes -= len;
> @@ -251,7 +247,7 @@ static void commit_anon_folio_batch(struct vm_area_struct *vma,
>  }
>  static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
> -		struct folio *folio, unsigned long addr, pte_t *ptep,
> +		struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep,
>  		pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
>  {
>  	bool set_write;
> @@ -270,7 +266,7 @@ static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
>  				       /* idx = */ 0, set_write, tlb);
>  		return;
>  	}
> -	commit_anon_folio_batch(vma, folio, addr, ptep, oldpte, ptent, nr_ptes, tlb);
> +	commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb);
>  }
>  static long change_pte_range(struct mmu_gather *tlb,
> @@ -305,15 +301,20 @@ static long change_pte_range(struct mmu_gather *tlb,
>  			const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE;
>  			int max_nr_ptes = (end - addr) >> PAGE_SHIFT;
>  			struct folio *folio = NULL;
> +			struct page *page;
>  			pte_t ptent;
> +			page = vm_normal_folio(vma, addr, oldpte);

Surely vm_normal_page()? :P

> +			if (page)
> +				folio = page_folio(page);
> +
>  			/*
>  			 * Avoid trapping faults against the zero or KSM
>  			 * pages. See similar comment in change_huge_pmd.
>  			 */
>  			if (prot_numa) {
>  				int ret = prot_numa_skip(vma, addr, oldpte, pte,
> -							 target_node, &folio);
> +							 target_node, folio);
>  				if (ret) {
>  					/* determine batch to skip */
> @@ -323,9 +324,6 @@ static long change_pte_range(struct mmu_gather *tlb,
>  				}
>  			}
> -			if (!folio)
> -				folio = vm_normal_folio(vma, addr, oldpte);
> -

Yes :) thanks :>)

>  			nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);
>  			oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes);
> @@ -351,7 +349,7 @@ static long change_pte_range(struct mmu_gather *tlb,
>  			 */
>  			if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
>  			     !pte_write(ptent))
> -				set_write_prot_commit_flush_ptes(vma, folio,
> +				set_write_prot_commit_flush_ptes(vma, folio, page,
>  				addr, pte, oldpte, ptent, nr_ptes, tlb);
>  			else
>  				prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent,
> --
> 2.50.1
>
>
> --
> Cheers,
>
> David / dhildenb
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ