[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240229232211.161961-3-samuel.holland@sifive.com>
Date: Thu, 29 Feb 2024 15:21:43 -0800
From: Samuel Holland <samuel.holland@...ive.com>
To: Palmer Dabbelt <palmer@...belt.com>,
linux-riscv@...ts.infradead.org
Cc: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
Alexandre Ghiti <alexghiti@...osinc.com>,
Jisheng Zhang <jszhang@...nel.org>,
Yunhui Cui <cuiyunhui@...edance.com>,
Samuel Holland <samuel.holland@...ive.com>
Subject: [PATCH v5 02/13] riscv: Factor out page table TLB synchronization
The logic is the same for all page table levels. See commit 69be3fb111e7
("riscv: enable MMU_GATHER_RCU_TABLE_FREE for SMP && MMU").
Signed-off-by: Samuel Holland <samuel.holland@...ive.com>
---
Changes in v5:
- New patch for v5
arch/riscv/include/asm/pgalloc.h | 31 +++++++++++++------------------
1 file changed, 13 insertions(+), 18 deletions(-)
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index deaf971253a2..87468f67951a 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -88,6 +88,14 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
return NULL;
}
+static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
+{
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, pt);
+ else
+ tlb_remove_ptdesc(tlb, pt);
+}
+
#define pud_free pud_free
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
@@ -102,10 +110,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
struct ptdesc *ptdesc = virt_to_ptdesc(pud);
pagetable_pud_dtor(ptdesc);
- if (riscv_use_ipi_for_rfence())
- tlb_remove_page_ptdesc(tlb, ptdesc);
- else
- tlb_remove_ptdesc(tlb, ptdesc);
+ riscv_tlb_remove_ptdesc(tlb, ptdesc);
}
}
@@ -139,12 +144,8 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr)
{
- if (pgtable_l5_enabled) {
- if (riscv_use_ipi_for_rfence())
- tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d));
- else
- tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
- }
+ if (pgtable_l5_enabled)
+ riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -176,10 +177,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
pagetable_pmd_dtor(ptdesc);
- if (riscv_use_ipi_for_rfence())
- tlb_remove_page_ptdesc(tlb, ptdesc);
- else
- tlb_remove_ptdesc(tlb, ptdesc);
+ riscv_tlb_remove_ptdesc(tlb, ptdesc);
}
#endif /* __PAGETABLE_PMD_FOLDED */
@@ -190,10 +188,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
struct ptdesc *ptdesc = page_ptdesc(pte);
pagetable_pte_dtor(ptdesc);
- if (riscv_use_ipi_for_rfence())
- tlb_remove_page_ptdesc(tlb, ptdesc);
- else
- tlb_remove_ptdesc(tlb, ptdesc);
+ riscv_tlb_remove_ptdesc(tlb, ptdesc);
}
#endif /* CONFIG_MMU */
--
2.43.1
Powered by blists - more mailing lists