[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <e23662d6be464b6ed89d6abde13df1b4694f6583.1724226076.git.zhengqi.arch@bytedance.com>
Date: Wed, 21 Aug 2024 16:18:53 +0800
From: Qi Zheng <zhengqi.arch@...edance.com>
To: david@...hat.com,
hughd@...gle.com,
willy@...radead.org,
muchun.song@...ux.dev,
vbabka@...nel.org,
akpm@...ux-foundation.org,
rppt@...nel.org,
vishal.moola@...il.com,
peterx@...hat.com,
ryan.roberts@....com
Cc: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
linux-arm-kernel@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org,
Qi Zheng <zhengqi.arch@...edance.com>
Subject: [PATCH 10/14] mm: page_vma_mapped_walk: map_pte() use pte_offset_map_maywrite_nolock()
In the caller of map_pte(), we may modify the pvmw->pte after acquiring
the pvmw->ptl, so convert it to using pte_offset_map_maywrite_nolock(). At
this time, the write lock of mmap_lock is not held, and the pte_same()
check is not performed after the pvmw->ptl held, so we should get pmdval
and do pmd_same() check to ensure the stability of pvmw->pmd.
Signed-off-by: Qi Zheng <zhengqi.arch@...edance.com>
---
mm/page_vma_mapped.c | 24 ++++++++++++++++++++----
1 file changed, 20 insertions(+), 4 deletions(-)
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index ae5cc42aa2087..da806f3a5047d 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -13,9 +13,11 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw)
return false;
}
-static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
+static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp,
+ spinlock_t **ptlp)
{
pte_t ptent;
+ pmd_t pmdval;
if (pvmw->flags & PVMW_SYNC) {
/* Use the stricter lookup */
@@ -25,6 +27,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
return !!pvmw->pte;
}
+again:
/*
* It is important to return the ptl corresponding to pte,
* in case *pvmw->pmd changes underneath us; so we need to
@@ -32,10 +35,11 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
* proceeds to loop over next ptes, and finds a match later.
* Though, in most cases, page lock already protects this.
*/
- pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
- pvmw->address, ptlp);
+ pvmw->pte = pte_offset_map_maywrite_nolock(pvmw->vma->vm_mm, pvmw->pmd,
+ pvmw->address, &pmdval, ptlp);
if (!pvmw->pte)
return false;
+ *pmdvalp = pmdval;
ptent = ptep_get(pvmw->pte);
@@ -69,6 +73,12 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
}
pvmw->ptl = *ptlp;
spin_lock(pvmw->ptl);
+
+ if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pvmw->pmd)))) {
+ spin_unlock(pvmw->ptl);
+ goto again;
+ }
+
return true;
}
@@ -278,7 +288,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
step_forward(pvmw, PMD_SIZE);
continue;
}
- if (!map_pte(pvmw, &ptl)) {
+ if (!map_pte(pvmw, &pmde, &ptl)) {
if (!pvmw->pte)
goto restart;
goto next_pte;
@@ -307,6 +317,12 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (!pvmw->ptl) {
pvmw->ptl = ptl;
spin_lock(pvmw->ptl);
+ if (unlikely(!pmd_same(pmde, pmdp_get_lockless(pvmw->pmd)))) {
+ pte_unmap_unlock(pvmw->pte, pvmw->ptl);
+ pvmw->ptl = NULL;
+ pvmw->pte = NULL;
+ goto restart;
+ }
}
goto this_pte;
} while (pvmw->address < end);
--
2.20.1
Powered by blists - more mailing lists