lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b6ccc61b-2948-4529-9c9d-47e9c9ed25ab@lucifer.local>
Date: Fri, 27 Jun 2025 20:04:48 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: David Hildenbrand <david@...hat.com>
Cc: linux-kernel@...r.kernel.org, linux-mm@...ck.org,
        Andrew Morton <akpm@...ux-foundation.org>,
        "Liam R. Howlett" <Liam.Howlett@...cle.com>,
        Vlastimil Babka <vbabka@...e.cz>, Jann Horn <jannh@...gle.com>,
        Mike Rapoport <rppt@...nel.org>,
        Suren Baghdasaryan <surenb@...gle.com>, Michal Hocko <mhocko@...e.com>,
        Zi Yan <ziy@...dia.com>, Matthew Brost <matthew.brost@...el.com>,
        Joshua Hahn <joshua.hahnjy@...il.com>, Rakie Kim <rakie.kim@...com>,
        Byungchul Park <byungchul@...com>, Gregory Price <gourry@...rry.net>,
        Ying Huang <ying.huang@...ux.alibaba.com>,
        Alistair Popple <apopple@...dia.com>, Pedro Falcato <pfalcato@...e.de>,
        Rik van Riel <riel@...riel.com>, Harry Yoo <harry.yoo@...cle.com>
Subject: Re: [PATCH v1 4/4] mm: remove boolean output parameters from
 folio_pte_batch_ext()

On Fri, Jun 27, 2025 at 01:55:10PM +0200, David Hildenbrand wrote:
> Instead, let's just allow for specifying through flags whether we want
> to have bits merged into the original PTE.
>
> For the madvise() case, simplify by having only a single parameter for
> merging young+dirty. For madvise_cold_or_pageout_pte_range() merging the
> dirty bit is not required, but also not harmful. This code is not that
> performance critical after all to really force all micro-optimizations.
>
> As we now have two pte_t * parameters, use PageTable() to make sure we
> are actually given a pointer at a copy of the PTE, not a pointer into
> an actual page table.
>
> Signed-off-by: David Hildenbrand <david@...hat.com>

Overall a really nice cleanup! Just some comments below.

> ---
>  mm/internal.h | 58 +++++++++++++++++++++++++++++++--------------------
>  mm/madvise.c  | 26 +++++------------------
>  mm/memory.c   |  8 ++-----
>  mm/util.c     |  2 +-
>  4 files changed, 43 insertions(+), 51 deletions(-)
>
> diff --git a/mm/internal.h b/mm/internal.h
> index 6000b683f68ee..fe69e21b34a24 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -208,6 +208,18 @@ typedef int __bitwise fpb_t;
>  /* Compare PTEs honoring the soft-dirty bit. */
>  #define FPB_HONOR_SOFT_DIRTY		((__force fpb_t)BIT(1))
>
> +/*
> + * Merge PTE write bits: if any PTE in the batch is writable, modify the
> + * PTE at @ptentp to be writable.
> + */
> +#define FPB_MERGE_WRITE			((__force fpb_t)BIT(2))
> +
> +/*
> + * Merge PTE young and dirty bits: if any PTE in the batch is young or dirty,
> + * modify the PTE at @ptentp to be young or dirty, respectively.
> + */
> +#define FPB_MERGE_YOUNG_DIRTY		((__force fpb_t)BIT(3))
> +
>  static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
>  {
>  	if (!(flags & FPB_HONOR_DIRTY))
> @@ -220,16 +232,11 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
>  /**
>   * folio_pte_batch_ext - detect a PTE batch for a large folio
>   * @folio: The large folio to detect a PTE batch for.
> + * @vma: The VMA. Only relevant with FPB_MERGE_WRITE, otherwise can be NULL.
>   * @ptep: Page table pointer for the first entry.
> - * @pte: Page table entry for the first page.
> + * @ptentp: Pointer at a copy of the first page table entry.

This seems weird to me, I know it's a pointer to a copy of the PTE, essentially
replacing the pte param from before, but now it's also an output value?
Shouldn't this be made clear?

I know it's a pain and churn but if this is now meant to be an output var we
should probably make it the last param too.

At least needs an (output) or something here.

>   * @max_nr: The maximum number of table entries to consider.
>   * @flags: Flags to modify the PTE batch semantics.
> - * @any_writable: Optional pointer to indicate whether any entry except the
> - *		  first one is writable.
> - * @any_young: Optional pointer to indicate whether any entry except the
> - *		  first one is young.
> - * @any_dirty: Optional pointer to indicate whether any entry except the
> - *		  first one is dirty.
>   *
>   * Detect a PTE batch: consecutive (present) PTEs that map consecutive
>   * pages of the same large folio in a single VMA and a single page table.
> @@ -242,28 +249,26 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags)
>   * must be limited by the caller so scanning cannot exceed a single VMA and
>   * a single page table.
>   *
> + * Depending on the FPB_MERGE_* flags, the pte stored at @ptentp will
> + * be modified.

This explains that you modify it but it doesn't really stand out as an output
parameter.

> + *
>   * This function will be inlined to optimize based on the input parameters;
>   * consider using folio_pte_batch() instead if applicable.
>   *
>   * Return: the number of table entries in the batch.
>   */
>  static inline unsigned int folio_pte_batch_ext(struct folio *folio,
> -		pte_t *ptep, pte_t pte, unsigned int max_nr, fpb_t flags,
> -		bool *any_writable, bool *any_young, bool *any_dirty)
> +		struct vm_area_struct *vma, pte_t *ptep, pte_t *ptentp,
> +		unsigned int max_nr, fpb_t flags)
>  {
> +	bool any_writable = false, any_young = false, any_dirty = false;
> +	pte_t expected_pte, pte = *ptentp;
>  	unsigned int nr, cur_nr;
> -	pte_t expected_pte;
> -
> -	if (any_writable)
> -		*any_writable = false;
> -	if (any_young)
> -		*any_young = false;
> -	if (any_dirty)
> -		*any_dirty = false;
>
>  	VM_WARN_ON_FOLIO(!pte_present(pte), folio);
>  	VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio);
>  	VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio);
> +	VM_WARN_ON(virt_addr_valid(ptentp) && PageTable(virt_to_page(ptentp)));

Hm so if !virt_addr_valid(ptentp) we're ok? :P I also think a quick comment here
would help, the commit message explains it but glancing at this I'd be confused.

Something like:

/* Ensure this is a pointer to a copy not a pointer into a page table. */

>
>  	/* Limit max_nr to the actual remaining PFNs in the folio we could batch. */
>  	max_nr = min_t(unsigned long, max_nr,
> @@ -279,12 +284,12 @@ static inline unsigned int folio_pte_batch_ext(struct folio *folio,
>  		if (!pte_same(__pte_batch_clear_ignored(pte, flags), expected_pte))
>  			break;
>
> -		if (any_writable)
> -			*any_writable |= pte_write(pte);
> -		if (any_young)
> -			*any_young |= pte_young(pte);
> -		if (any_dirty)
> -			*any_dirty |= pte_dirty(pte);
> +		if (flags & FPB_MERGE_WRITE)
> +			any_writable |= pte_write(pte);
> +		if (flags & FPB_MERGE_YOUNG_DIRTY) {
> +			any_young |= pte_young(pte);
> +			any_dirty |= pte_dirty(pte);
> +		}
>
>  		cur_nr = pte_batch_hint(ptep, pte);
>  		expected_pte = pte_advance_pfn(expected_pte, cur_nr);
> @@ -292,6 +297,13 @@ static inline unsigned int folio_pte_batch_ext(struct folio *folio,
>  		nr += cur_nr;
>  	}
>
> +	if (any_writable)
> +		*ptentp = pte_mkwrite(*ptentp, vma);
> +	if (any_young)
> +		*ptentp = pte_mkyoung(*ptentp);
> +	if (any_dirty)
> +		*ptentp = pte_mkdirty(*ptentp);
> +
>  	return min(nr, max_nr);
>  }
>
> diff --git a/mm/madvise.c b/mm/madvise.c
> index 9b9c35a398ed0..dce8f5e8555cb 100644
> --- a/mm/madvise.c
> +++ b/mm/madvise.c
> @@ -344,13 +344,12 @@ static inline bool can_do_file_pageout(struct vm_area_struct *vma)
>
>  static inline int madvise_folio_pte_batch(unsigned long addr, unsigned long end,
>  					  struct folio *folio, pte_t *ptep,
> -					  pte_t pte, bool *any_young,
> -					  bool *any_dirty)
> +					  pte_t *ptentp)
>  {
>  	int max_nr = (end - addr) / PAGE_SIZE;
>
> -	return folio_pte_batch_ext(folio, ptep, pte, max_nr, 0, NULL,
> -				   any_young, any_dirty);
> +	return folio_pte_batch_ext(folio, NULL, ptep, ptentp, max_nr,
> +				   FPB_MERGE_YOUNG_DIRTY);
>  }
>
>  static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
> @@ -488,13 +487,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
>  		 * next pte in the range.
>  		 */
>  		if (folio_test_large(folio)) {
> -			bool any_young;
> -
> -			nr = madvise_folio_pte_batch(addr, end, folio, pte,
> -						     ptent, &any_young, NULL);
> -			if (any_young)
> -				ptent = pte_mkyoung(ptent);
> -
> +			nr = madvise_folio_pte_batch(addr, end, folio, pte, &ptent);
>  			if (nr < folio_nr_pages(folio)) {
>  				int err;
>
> @@ -724,11 +717,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
>  		 * next pte in the range.
>  		 */
>  		if (folio_test_large(folio)) {
> -			bool any_young, any_dirty;
> -
> -			nr = madvise_folio_pte_batch(addr, end, folio, pte,
> -						     ptent, &any_young, &any_dirty);
> -
> +			nr = madvise_folio_pte_batch(addr, end, folio, pte, &ptent);
>  			if (nr < folio_nr_pages(folio)) {
>  				int err;
>
> @@ -753,11 +742,6 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
>  					nr = 0;
>  				continue;
>  			}
> -
> -			if (any_young)
> -				ptent = pte_mkyoung(ptent);
> -			if (any_dirty)
> -				ptent = pte_mkdirty(ptent);
>  		}
>
>  		if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
> diff --git a/mm/memory.c b/mm/memory.c
> index 43d35d6675f2e..985d09bee44fd 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -972,10 +972,9 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
>  		 pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr,
>  		 int max_nr, int *rss, struct folio **prealloc)
>  {
> +	fpb_t flags = FPB_MERGE_WRITE;
>  	struct page *page;
>  	struct folio *folio;
> -	bool any_writable;
> -	fpb_t flags = 0;
>  	int err, nr;
>
>  	page = vm_normal_page(src_vma, addr, pte);
> @@ -995,8 +994,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
>  		if (vma_soft_dirty_enabled(src_vma))
>  			flags |= FPB_HONOR_SOFT_DIRTY;
>
> -		nr = folio_pte_batch_ext(folio, src_pte, pte, max_nr, flags,
> -				     &any_writable, NULL, NULL);
> +		nr = folio_pte_batch_ext(folio, src_vma, src_pte, &pte, max_nr, flags);
>  		folio_ref_add(folio, nr);
>  		if (folio_test_anon(folio)) {
>  			if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page,
> @@ -1010,8 +1008,6 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
>  			folio_dup_file_rmap_ptes(folio, page, nr, dst_vma);
>  			rss[mm_counter_file(folio)] += nr;
>  		}
> -		if (any_writable)
> -			pte = pte_mkwrite(pte, src_vma);
>  		__copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte,
>  				    addr, nr);
>  		return nr;
> diff --git a/mm/util.c b/mm/util.c
> index d29dcc135ad28..19d1a5814fac7 100644
> --- a/mm/util.c
> +++ b/mm/util.c
> @@ -1197,6 +1197,6 @@ EXPORT_SYMBOL(compat_vma_mmap_prepare);
>  unsigned int folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte,
>  		unsigned int max_nr)
>  {
> -	return folio_pte_batch_ext(folio, ptep, pte, max_nr, 0, NULL, NULL, NULL);
> +	return folio_pte_batch_ext(folio, NULL, ptep, &pte, max_nr, 0);
>  }
>  #endif /* CONFIG_MMU */
> --
> 2.49.0
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ