[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20230109205336.3665937-24-surenb@google.com>
Date: Mon, 9 Jan 2023 12:53:18 -0800
From: Suren Baghdasaryan <surenb@...gle.com>
To: akpm@...ux-foundation.org
Cc: michel@...pinasse.org, jglisse@...gle.com, mhocko@...e.com,
vbabka@...e.cz, hannes@...xchg.org, mgorman@...hsingularity.net,
dave@...olabs.net, willy@...radead.org, liam.howlett@...cle.com,
peterz@...radead.org, ldufour@...ux.ibm.com,
laurent.dufour@...ibm.com, paulmck@...nel.org, luto@...nel.org,
songliubraving@...com, peterx@...hat.com, david@...hat.com,
dhowells@...hat.com, hughd@...gle.com, bigeasy@...utronix.de,
kent.overstreet@...ux.dev, punit.agrawal@...edance.com,
lstoakes@...il.com, peterjung1337@...il.com, rientjes@...gle.com,
axelrasmussen@...gle.com, joelaf@...gle.com, minchan@...gle.com,
jannh@...gle.com, shakeelb@...gle.com, tatashin@...gle.com,
edumazet@...gle.com, gthelen@...gle.com, gurua@...gle.com,
arjunroy@...gle.com, soheil@...gle.com, hughlynch@...gle.com,
leewalsh@...gle.com, posk@...gle.com, linux-mm@...ck.org,
linux-arm-kernel@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org, x86@...nel.org,
linux-kernel@...r.kernel.org, kernel-team@...roid.com,
surenb@...gle.com
Subject: [PATCH 23/41] mm: write-lock VMAs before removing them from VMA tree
Write-locking VMAs before isolating them ensures that page fault
handlers don't operate on isolated VMAs.
Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
---
mm/mmap.c | 2 ++
mm/nommu.c | 5 +++++
2 files changed, 7 insertions(+)
diff --git a/mm/mmap.c b/mm/mmap.c
index da1908730828..be289e0b693b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -448,6 +448,7 @@ void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
*/
void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
{
+ vma_write_lock(vma);
trace_vma_mas_szero(mas->tree, vma->vm_start, vma->vm_end - 1);
mas->index = vma->vm_start;
mas->last = vma->vm_end - 1;
@@ -2300,6 +2301,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
static inline int munmap_sidetree(struct vm_area_struct *vma,
struct ma_state *mas_detach)
{
+ vma_write_lock(vma);
mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
return -ENOMEM;
diff --git a/mm/nommu.c b/mm/nommu.c
index b3154357ced5..7ae91337ef14 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -552,6 +552,7 @@ void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
{
+ vma_write_lock(vma);
mas->index = vma->vm_start;
mas->last = vma->vm_end - 1;
mas_store_prealloc(mas, NULL);
@@ -1551,6 +1552,10 @@ void exit_mmap(struct mm_struct *mm)
mmap_write_lock(mm);
for_each_vma(vmi, vma) {
cleanup_vma_from_mm(vma);
+ /*
+ * No need to lock VMA because this is the only mm user and no
+ * page fault handled can race with it.
+ */
delete_vma(mm, vma);
cond_resched();
}
--
2.39.0
Powered by blists - more mailing lists