[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250716012611.10369-3-anthony.yznaga@oracle.com>
Date: Tue, 15 Jul 2025 18:26:10 -0700
From: Anthony Yznaga <anthony.yznaga@...cle.com>
To: davem@...emloft.net, andreas@...sler.com, arnd@...db.de,
muchun.song@...ux.dev, osalvador@...e.de, akpm@...ux-foundation.org,
david@...hat.com, lorenzo.stoakes@...cle.com, Liam.Howlett@...cle.com,
vbabka@...e.cz, rppt@...nel.org, surenb@...gle.com, mhocko@...e.com
Cc: linux-mm@...ck.org, sparclinux@...r.kernel.org, linux-arch@...r.kernel.org,
linux-kernel@...r.kernel.org, alexghiti@...osinc.com,
agordeev@...ux.ibm.com, anshuman.khandual@....com,
christophe.leroy@...roup.eu, ryan.roberts@....com, will@...nel.org
Subject: [PATCH 2/3] mm: remove call to hugetlb_free_pgd_range()
With the removal of the last arch-specific implementation of
hugetlb_free_pgd_range(), hugetlb VMAs no longer need special
handling when freeing page tables.
Signed-off-by: Anthony Yznaga <anthony.yznaga@...cle.com>
---
mm/memory.c | 42 ++++++++++++++++++------------------------
1 file changed, 18 insertions(+), 24 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index b0cda5aab398..49792af5b7d0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -380,32 +380,26 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
vma_start_write(vma);
unlink_anon_vmas(vma);
- if (is_vm_hugetlb_page(vma)) {
- unlink_file_vma(vma);
- hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
- floor, next ? next->vm_start : ceiling);
- } else {
- unlink_file_vma_batch_init(&vb);
- unlink_file_vma_batch_add(&vb, vma);
+ unlink_file_vma_batch_init(&vb);
+ unlink_file_vma_batch_add(&vb, vma);
- /*
- * Optimization: gather nearby vmas into one call down
- */
- while (next && next->vm_start <= vma->vm_end + PMD_SIZE
- && !is_vm_hugetlb_page(next)) {
- vma = next;
- next = mas_find(mas, ceiling - 1);
- if (unlikely(xa_is_zero(next)))
- next = NULL;
- if (mm_wr_locked)
- vma_start_write(vma);
- unlink_anon_vmas(vma);
- unlink_file_vma_batch_add(&vb, vma);
- }
- unlink_file_vma_batch_final(&vb);
- free_pgd_range(tlb, addr, vma->vm_end,
- floor, next ? next->vm_start : ceiling);
+ /*
+ * Optimization: gather nearby vmas into one call down
+ */
+ while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
+ vma = next;
+ next = mas_find(mas, ceiling - 1);
+ if (unlikely(xa_is_zero(next)))
+ next = NULL;
+ if (mm_wr_locked)
+ vma_start_write(vma);
+ unlink_anon_vmas(vma);
+ unlink_file_vma_batch_add(&vb, vma);
}
+ unlink_file_vma_batch_final(&vb);
+
+ free_pgd_range(tlb, addr, vma->vm_end,
+ floor, next ? next->vm_start : ceiling);
vma = next;
} while (vma);
}
--
2.47.1
Powered by blists - more mailing lists