[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250625063753.77511-3-ajd@linux.ibm.com>
Date: Wed, 25 Jun 2025 16:37:42 +1000
From: Andrew Donnellan <ajd@...ux.ibm.com>
To: linuxppc-dev@...ts.ozlabs.org, pasha.tatashin@...een.com,
akpm@...ux-foundation.org
Cc: linux-kernel@...r.kernel.org, x86@...nel.org,
linux-riscv@...ts.infradead.org, linux-arm-kernel@...ts.infradead.org,
linux-mm@...ck.org, sweettea-kernel@...miny.me,
christophe.leroy@...roup.eu, mingo@...nel.org
Subject: [PATCH v15 02/13] arm64/mm: Add addr parameter to __ptep_get_and_clear_anysz()
To provide support for page table check on powerpc, we need to reinstate the
address parameter in several functions, including
page_table_check_{pte,pmd,pud}_clear().
In preparation for this, add the addr parameter to arm64's
__ptep_get_and_clear_anysz() and change its callsites accordingly. While this
parameter won't (at present) be used on arm64, this will keep the usage of
the page table check interfaces consistent.
Signed-off-by: Andrew Donnellan <ajd@...ux.ibm.com>
---
v15: new patch
---
arch/arm64/include/asm/pgtable.h | 5 +++--
arch/arm64/mm/hugetlbpage.c | 7 ++++---
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index acbcb5e883ce..75572a08c012 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1381,6 +1381,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm,
+ unsigned long address,
pte_t *ptep,
unsigned long pgsize)
{
@@ -1408,7 +1409,7 @@ static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm,
static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
- return __ptep_get_and_clear_anysz(mm, ptep, PAGE_SIZE);
+ return __ptep_get_and_clear_anysz(mm, address, ptep, PAGE_SIZE);
}
static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
@@ -1447,7 +1448,7 @@ static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm,
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
- return pte_pmd(__ptep_get_and_clear_anysz(mm, (pte_t *)pmdp, PMD_SIZE));
+ return pte_pmd(__ptep_get_and_clear_anysz(mm, address, (pte_t *)pmdp, PMD_SIZE));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 1003b5020752..bcc28031eb7a 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -159,11 +159,12 @@ static pte_t get_clear_contig(struct mm_struct *mm,
pte_t pte, tmp_pte;
bool present;
- pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize);
+ pte = __ptep_get_and_clear_anysz(mm, addr, ptep, pgsize);
present = pte_present(pte);
while (--ncontig) {
ptep++;
- tmp_pte = __ptep_get_and_clear_anysz(mm, ptep, pgsize);
+ addr += pgsize;
+ tmp_pte = __ptep_get_and_clear_anysz(mm, addr, ptep, pgsize);
if (present) {
if (pte_dirty(tmp_pte))
pte = pte_mkdirty(pte);
@@ -207,7 +208,7 @@ static void clear_flush(struct mm_struct *mm,
unsigned long i, saddr = addr;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
- __ptep_get_and_clear_anysz(mm, ptep, pgsize);
+ __ptep_get_and_clear_anysz(mm, addr, ptep, pgsize);
if (mm == &init_mm)
flush_tlb_kernel_range(saddr, addr);
--
2.49.0
Powered by blists - more mailing lists