[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <acee52bf-4f19-4e85-8d69-15d6cda54e95@lucifer.local>
Date: Wed, 6 Aug 2025 17:40:28 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Dev Jain <dev.jain@....com>
Cc: akpm@...ux-foundation.org, ryan.roberts@....com, david@...hat.com,
willy@...radead.org, linux-mm@...ck.org, linux-kernel@...r.kernel.org,
catalin.marinas@....com, will@...nel.org, Liam.Howlett@...cle.com,
vbabka@...e.cz, jannh@...gle.com, anshuman.khandual@....com,
peterx@...hat.com, joey.gouly@....com, ioworker0@...il.com,
baohua@...nel.org, kevin.brodsky@....com, quic_zhenhuah@...cinc.com,
christophe.leroy@...roup.eu, yangyicong@...ilicon.com,
linux-arm-kernel@...ts.infradead.org, hughd@...gle.com,
yang@...amperecomputing.com, ziy@...dia.com,
syzbot+57bcc752f0df8bb1365c@...kaller.appspotmail.com
Subject: Re: [PATCH mm-hotfixes-unstable] mm: Pass page directly instead of
using folio_page
On Wed, Aug 06, 2025 at 08:26:11PM +0530, Dev Jain wrote:
> In commit_anon_folio_batch(), we iterate over all pages pointed to by the
> PTE batch. Therefore we need to know the first page of the batch;
> currently we derive that via folio_page(folio, 0), but, that takes us
> to the first (head) page of the folio instead - our PTE batch may lie
> in the middle of the folio, leading to incorrectness.
>
> Bite the bullet and throw away the micro-optimization of reusing the
> folio in favour of code simplicity. Derive the page and the folio in
> change_pte_range, and pass the page too to commit_anon_folio_batch to
> fix the aforementioned issue.
>
> Reported-by: syzbot+57bcc752f0df8bb1365c@...kaller.appspotmail.com
> Fixes: cac1db8c3aad ("mm: optimize mprotect() by PTE batching")
> Signed-off-by: Dev Jain <dev.jain@....com>
This looks reasonable, fixes the problem and compiles/works on my machine so:
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
This badly needs refactoring as 13 parameters being passed to a function is
ridiculous, but we can do that later.
Let's get this in as a hotfix asap.
> ---
> mm/mprotect.c | 23 ++++++++++-------------
> 1 file changed, 10 insertions(+), 13 deletions(-)
>
> diff --git a/mm/mprotect.c b/mm/mprotect.c
> index 78bded7acf79..113b48985834 100644
> --- a/mm/mprotect.c
> +++ b/mm/mprotect.c
> @@ -120,9 +120,8 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
>
> static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
> pte_t oldpte, pte_t *pte, int target_node,
> - struct folio **foliop)
> + struct folio *folio)
> {
> - struct folio *folio = NULL;
> bool ret = true;
> bool toptier;
> int nid;
> @@ -131,7 +130,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
> if (pte_protnone(oldpte))
> goto skip;
>
> - folio = vm_normal_folio(vma, addr, oldpte);
> if (!folio)
> goto skip;
>
> @@ -173,7 +171,6 @@ static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr,
> folio_xchg_access_time(folio, jiffies_to_msecs(jiffies));
>
> skip:
> - *foliop = folio;
> return ret;
> }
>
> @@ -231,10 +228,9 @@ static int page_anon_exclusive_sub_batch(int start_idx, int max_len,
> * retrieve sub-batches.
> */
> static void commit_anon_folio_batch(struct vm_area_struct *vma,
> - struct folio *folio, unsigned long addr, pte_t *ptep,
> + struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep,
> pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
> {
> - struct page *first_page = folio_page(folio, 0);
> bool expected_anon_exclusive;
> int sub_batch_idx = 0;
> int len;
> @@ -251,7 +247,7 @@ static void commit_anon_folio_batch(struct vm_area_struct *vma,
> }
>
> static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
> - struct folio *folio, unsigned long addr, pte_t *ptep,
> + struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep,
> pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
> {
> bool set_write;
> @@ -270,7 +266,7 @@ static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
> /* idx = */ 0, set_write, tlb);
> return;
> }
> - commit_anon_folio_batch(vma, folio, addr, ptep, oldpte, ptent, nr_ptes, tlb);
> + commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb);
> }
>
> static long change_pte_range(struct mmu_gather *tlb,
> @@ -305,15 +301,19 @@ static long change_pte_range(struct mmu_gather *tlb,
> const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE;
> int max_nr_ptes = (end - addr) >> PAGE_SHIFT;
> struct folio *folio = NULL;
> + struct page *page;
> pte_t ptent;
>
> + page = vm_normal_page(vma, addr, oldpte);
> + if (page)
> + folio = page_folio(page);
> /*
> * Avoid trapping faults against the zero or KSM
> * pages. See similar comment in change_huge_pmd.
> */
> if (prot_numa) {
> int ret = prot_numa_skip(vma, addr, oldpte, pte,
> - target_node, &folio);
> + target_node, folio);
> if (ret) {
>
> /* determine batch to skip */
> @@ -323,9 +323,6 @@ static long change_pte_range(struct mmu_gather *tlb,
> }
> }
>
> - if (!folio)
> - folio = vm_normal_folio(vma, addr, oldpte);
> -
> nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);
>
> oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes);
> @@ -351,7 +348,7 @@ static long change_pte_range(struct mmu_gather *tlb,
> */
> if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
> !pte_write(ptent))
> - set_write_prot_commit_flush_ptes(vma, folio,
> + set_write_prot_commit_flush_ptes(vma, folio, page,
> addr, pte, oldpte, ptent, nr_ptes, tlb);
> else
> prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent,
> --
> 2.30.2
>
Powered by blists - more mailing lists