[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20221030213045.335680-1-peterx@redhat.com>
Date: Sun, 30 Oct 2022 17:30:45 -0400
From: Peter Xu <peterx@...hat.com>
To: linux-mm@...ck.org, linux-kernel@...r.kernel.org
Cc: James Houghton <jthoughton@...gle.com>,
Mike Kravetz <mike.kravetz@...cle.com>,
David Hildenbrand <david@...hat.com>,
Andrea Arcangeli <aarcange@...hat.com>,
Rik van Riel <riel@...riel.com>, peterx@...hat.com,
Andrew Morton <akpm@...ux-foundation.org>,
Muchun Song <songmuchun@...edance.com>,
Miaohe Lin <linmiaohe@...wei.com>,
Nadav Amit <nadav.amit@...il.com>
Subject: [PATCH RFC 10/10] mm/hugetlb: Comment at rest huge_pte_offset() places
This makes sure that we're covering all the existing huge_pte_offset()
callers and mention why they are safe regarding to pmd unsharing.
Signed-off-by: Peter Xu <peterx@...hat.com>
---
mm/hugetlb.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6d336d286394..270bfc578115 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4822,6 +4822,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
last_addr_mask = hugetlb_mask_last_page(h);
for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
spinlock_t *src_ptl, *dst_ptl;
+ /* With vma lock held, safe without RCU */
src_pte = huge_pte_offset(src, addr, sz);
if (!src_pte) {
addr |= last_addr_mask;
@@ -5026,6 +5027,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
hugetlb_vma_lock_write(vma);
i_mmap_lock_write(mapping);
for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
+ /* With vma lock held, safe without RCU */
src_pte = huge_pte_offset(mm, old_addr, sz);
if (!src_pte) {
old_addr |= last_addr_mask;
@@ -5097,6 +5099,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
last_addr_mask = hugetlb_mask_last_page(h);
address = start;
for (; address < end; address += sz) {
+ /* With vma lock held, safe without RCU */
ptep = huge_pte_offset(mm, address, sz);
if (!ptep) {
address |= last_addr_mask;
@@ -5402,6 +5405,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
mutex_lock(&hugetlb_fault_mutex_table[hash]);
hugetlb_vma_lock_read(vma);
spin_lock(ptl);
+ /* With vma lock held, safe without RCU */
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep &&
pte_same(huge_ptep_get(ptep), pte)))
@@ -5440,6 +5444,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
* before the page tables are altered
*/
spin_lock(ptl);
+ /* With vma lock (and even pgtable lock) held, safe without RCU */
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
/* Break COW or unshare */
@@ -6511,6 +6516,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
last_addr_mask = hugetlb_mask_last_page(h);
for (; address < end; address += psize) {
spinlock_t *ptl;
+ /* With vma lock held, safe without RCU */
ptep = huge_pte_offset(mm, address, psize);
if (!ptep) {
address |= last_addr_mask;
@@ -7060,7 +7066,14 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
saddr = page_table_shareable(svma, vma, addr, idx);
if (saddr) {
+ /*
+ * huge_pmd_share() (or say its solo caller,
+ * huge_pte_alloc()) always takes the hugetlb vma
+ * lock, so it's always safe to walk the pgtable of
+ * the process, even without RCU.
+ */
spte = huge_pte_offset(svma->vm_mm, saddr,
+
vma_mmu_pagesize(svma));
if (spte) {
get_page(virt_to_page(spte));
@@ -7420,6 +7433,7 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
hugetlb_vma_lock_write(vma);
i_mmap_lock_write(vma->vm_file->f_mapping);
for (address = start; address < end; address += PUD_SIZE) {
+ /* With vma lock held, safe without RCU */
ptep = huge_pte_offset(mm, address, sz);
if (!ptep)
continue;
--
2.37.3
Powered by blists - more mailing lists