[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241112194635.444146-4-surenb@google.com>
Date: Tue, 12 Nov 2024 11:46:33 -0800
From: Suren Baghdasaryan <surenb@...gle.com>
To: akpm@...ux-foundation.org
Cc: willy@...radead.org, liam.howlett@...cle.com, lorenzo.stoakes@...cle.com,
mhocko@...e.com, vbabka@...e.cz, hannes@...xchg.org, mjguzik@...il.com,
oliver.sang@...el.com, mgorman@...hsingularity.net, david@...hat.com,
peterx@...hat.com, oleg@...hat.com, dave@...olabs.net, paulmck@...nel.org,
brauner@...nel.org, dhowells@...hat.com, hdanton@...a.com, hughd@...gle.com,
minchan@...gle.com, jannh@...gle.com, shakeel.butt@...ux.dev,
souravpanda@...gle.com, pasha.tatashin@...een.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, kernel-team@...roid.com, surenb@...gle.com
Subject: [PATCH v2 3/5] mm: mark vma as detached until it's added into vma tree
Current implementation does not set detached flag when a VMA is first
allocated. This does not represent the real state of the VMA, which is
detached until it is added into mm's VMA tree. Fix this by marking new
VMAs as detached and resetting detached flag only after VMA is added
into a tree.
Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
---
include/linux/mm.h | 10 +++++++++-
mm/memory.c | 2 +-
mm/mmap.c | 2 ++
mm/nommu.c | 2 ++
mm/vma.c | 3 +++
tools/testing/vma/vma_internal.h | 3 ++-
6 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a5eb0be3e351..245a85caf4c3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -812,6 +812,11 @@ static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
vma->detached = detached;
}
+static inline bool is_vma_detached(struct vm_area_struct *vma)
+{
+ return vma->detached;
+}
+
static inline void release_fault_lock(struct vm_fault *vmf)
{
if (vmf->flags & FAULT_FLAG_VMA_LOCK)
@@ -874,7 +879,10 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
vma->vm_mm = mm;
vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma_mark_detached(vma, false);
+#ifdef CONFIG_PER_VMA_LOCK
+ /* vma is not locked, can't use vma_mark_detached() */
+ vma->detached = true;
+#endif
vma_numab_state_init(vma);
vma_lock_init(vma);
}
diff --git a/mm/memory.c b/mm/memory.c
index 209885a4134f..d0197a0c0996 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6279,7 +6279,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
goto inval;
/* Check if the VMA got isolated after we found it */
- if (vma->detached) {
+ if (is_vma_detached(vma)) {
vma_end_read(vma);
count_vm_vma_lock_event(VMA_LOCK_MISS);
/* The area was replaced with another one */
diff --git a/mm/mmap.c b/mm/mmap.c
index 386429f7db5a..1295c4cedaf4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1570,6 +1570,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
goto mas_store_fail;
+ vma_mark_detached(vma, false);
mm->map_count++;
validate_mm(mm);
ksm_add_vma(vma);
@@ -1890,6 +1891,7 @@ static struct vm_area_struct *__install_special_mapping(
if (ret)
goto out;
+ vma_mark_detached(vma, false);
vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
perf_event_mmap(vma);
diff --git a/mm/nommu.c b/mm/nommu.c
index 9cb6e99215e2..6afd5c2bd97d 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1192,6 +1192,7 @@ unsigned long do_mmap(struct file *file,
current->mm->map_count++;
/* add the VMA to the tree */
vma_iter_store(&vmi, vma);
+ vma_mark_detached(vma, false);
/* we flush the region from the icache only when the first executable
* mapping of it is made */
@@ -1357,6 +1358,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
setup_vma_to_mm(vma, mm);
setup_vma_to_mm(new, mm);
vma_iter_store(vmi, new);
+ vma_mark_detached(new, false);
mm->map_count++;
return 0;
diff --git a/mm/vma.c b/mm/vma.c
index 8a454a7bbc80..1426871fa6e0 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -275,6 +275,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
* (it may either follow vma or precede it).
*/
vma_iter_store(vmi, vp->insert);
+ vma_mark_detached(vp->insert, false);
mm->map_count++;
}
@@ -1690,6 +1691,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
vma_start_write(vma);
vma_iter_store(&vmi, vma);
+ vma_mark_detached(vma, false);
vma_link_file(vma);
mm->map_count++;
validate_mm(mm);
@@ -2369,6 +2371,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
/* Lock the VMA since it is modified after insertion into VMA tree */
vma_start_write(vma);
vma_iter_store(vmi, vma);
+ vma_mark_detached(vma, false);
map->mm->map_count++;
vma_link_file(vma);
diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
index 1d9fc97b8e80..fdb60978821f 100644
--- a/tools/testing/vma/vma_internal.h
+++ b/tools/testing/vma/vma_internal.h
@@ -438,7 +438,8 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
vma->vm_mm = mm;
vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma_mark_detached(vma, false);
+ /* vma is not locked, can't use vma_mark_detached() */
+ vma->detached = true;
}
static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
--
2.47.0.277.g8800431eea-goog
Powered by blists - more mailing lists