[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CA+CK2bA-3p1iy9ryaQSthV0MhpxzuMVK91F1YF45qw90PUtG3g@mail.gmail.com>
Date: Thu, 18 Dec 2025 12:49:03 -0500
From: Pasha Tatashin <pasha.tatashin@...een.com>
To: Andrew Donnellan <ajd@...ux.ibm.com>
Cc: linux-mm@...ck.org, linuxppc-dev@...ts.ozlabs.org,
Andrew Morton <akpm@...ux-foundation.org>, Madhavan Srinivasan <maddy@...ux.ibm.com>,
Nicholas Piggin <npiggin@...il.com>, Rohan McLure <rmclure@...dia.com>,
Christophe Leroy <chleroy@...nel.org>, Alexandre Ghiti <alex@...ti.fr>, x86@...nel.org,
Nicholas Miehlbradt <nicholas@...ux.ibm.com>, Sweet Tea Dorminy <sweettea-kernel@...miny.me>,
Andrew Donnellan <andrew+kernel@...nellan.id.au>, Srish Srinivasan <ssrish@...ux.ibm.com>,
linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-riscv@...ts.infradead.org
Subject: Re: [PATCH v18 01/12] arm64/mm: Add addr parameter to __set_ptes_anysz()
On Thu, Dec 18, 2025 at 12:10 PM Andrew Donnellan <ajd@...ux.ibm.com> wrote:
>
> To provide support for page table check on powerpc, we need to reinstate the
> address parameter in several functions, including
> page_table_check_{ptes,pmds,puds}_set().
>
> In preparation for this, add the addr parameter to arm64's __set_ptes_anysz()
> and change its callsites accordingly.
>
> Signed-off-by: Andrew Donnellan <ajd@...ux.ibm.com>
Reviewed-by: Pasha Tatashin <pasha.tatashin@...een.com>
> ---
> v15: new patch
> v16: rebase
> ---
> arch/arm64/include/asm/pgtable.h | 19 ++++++++-----------
> arch/arm64/mm/hugetlbpage.c | 10 +++++-----
> 2 files changed, 13 insertions(+), 16 deletions(-)
>
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 445e18e92221c98eef717888aeac71d1d6b1da06..52f3ea07427cef399e68bea0dbab39d03ea83060 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -673,8 +673,8 @@ static inline pgprot_t pud_pgprot(pud_t pud)
> return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud));
> }
>
> -static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep,
> - pte_t pte, unsigned int nr,
> +static inline void __set_ptes_anysz(struct mm_struct *mm, unsigned long addr,
> + pte_t *ptep, pte_t pte, unsigned int nr,
> unsigned long pgsize)
> {
> unsigned long stride = pgsize >> PAGE_SHIFT;
> @@ -709,26 +709,23 @@ static inline void __set_ptes_anysz(struct mm_struct *mm, pte_t *ptep,
> __set_pte_complete(pte);
> }
>
> -static inline void __set_ptes(struct mm_struct *mm,
> - unsigned long __always_unused addr,
> +static inline void __set_ptes(struct mm_struct *mm, unsigned long addr,
> pte_t *ptep, pte_t pte, unsigned int nr)
> {
> - __set_ptes_anysz(mm, ptep, pte, nr, PAGE_SIZE);
> + __set_ptes_anysz(mm, addr, ptep, pte, nr, PAGE_SIZE);
> }
>
> -static inline void __set_pmds(struct mm_struct *mm,
> - unsigned long __always_unused addr,
> +static inline void __set_pmds(struct mm_struct *mm, unsigned long addr,
> pmd_t *pmdp, pmd_t pmd, unsigned int nr)
> {
> - __set_ptes_anysz(mm, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE);
> + __set_ptes_anysz(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE);
> }
> #define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1)
>
> -static inline void __set_puds(struct mm_struct *mm,
> - unsigned long __always_unused addr,
> +static inline void __set_puds(struct mm_struct *mm, unsigned long addr,
> pud_t *pudp, pud_t pud, unsigned int nr)
> {
> - __set_ptes_anysz(mm, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE);
> + __set_ptes_anysz(mm, addr, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE);
> }
> #define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1)
>
> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
> index 1d90a7e753336d86314fee0f753e8779d6a6bc12..1003b502075208d4252c27ffdacb1aaf4928639b 100644
> --- a/arch/arm64/mm/hugetlbpage.c
> +++ b/arch/arm64/mm/hugetlbpage.c
> @@ -225,8 +225,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
> ncontig = num_contig_ptes(sz, &pgsize);
>
> if (!pte_present(pte)) {
> - for (i = 0; i < ncontig; i++, ptep++)
> - __set_ptes_anysz(mm, ptep, pte, 1, pgsize);
> + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
> + __set_ptes_anysz(mm, addr, ptep, pte, 1, pgsize);
> return;
> }
>
> @@ -234,7 +234,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
> if (pte_cont(pte) && pte_valid(__ptep_get(ptep)))
> clear_flush(mm, addr, ptep, pgsize, ncontig);
>
> - __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize);
> + __set_ptes_anysz(mm, addr, ptep, pte, ncontig, pgsize);
> }
>
> pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
> @@ -449,7 +449,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
> if (pte_young(orig_pte))
> pte = pte_mkyoung(pte);
>
> - __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize);
> + __set_ptes_anysz(mm, addr, ptep, pte, ncontig, pgsize);
> return 1;
> }
>
> @@ -473,7 +473,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
> pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
> pte = pte_wrprotect(pte);
>
> - __set_ptes_anysz(mm, ptep, pte, ncontig, pgsize);
> + __set_ptes_anysz(mm, addr, ptep, pte, ncontig, pgsize);
> }
>
> pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
>
> --
> 2.52.0
>
>
Powered by blists - more mailing lists