[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <3t6mrzpv2ylqgkwrjvu6wjazrjxqupoz3wxsmbmawmlyodrwhs@vlxleb5wjsfv>
Date: Fri, 21 Feb 2025 11:11:51 -0500
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: Suren Baghdasaryan <surenb@...gle.com>
Cc: akpm@...ux-foundation.org, peterz@...radead.org, willy@...radead.org,
lorenzo.stoakes@...cle.com, david.laight.linux@...il.com,
mhocko@...e.com, vbabka@...e.cz, hannes@...xchg.org, mjguzik@...il.com,
oliver.sang@...el.com, mgorman@...hsingularity.net, david@...hat.com,
peterx@...hat.com, oleg@...hat.com, dave@...olabs.net,
paulmck@...nel.org, brauner@...nel.org, dhowells@...hat.com,
hdanton@...a.com, hughd@...gle.com, lokeshgidra@...gle.com,
minchan@...gle.com, jannh@...gle.com, shakeel.butt@...ux.dev,
souravpanda@...gle.com, pasha.tatashin@...een.com,
klarasmodin@...il.com, richard.weiyang@...il.com, corbet@....net,
linux-doc@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: Re: [PATCH v10 04/18] mm: introduce vma_iter_store_attached() to use
with attached vmas
* Suren Baghdasaryan <surenb@...gle.com> [250213 17:47]:
> vma_iter_store() functions can be used both when adding a new vma and
> when updating an existing one. However for existing ones we do not need
> to mark them attached as they are already marked that way. With
> vma->detached being a separate flag, double-marking a vmas as attached
> or detached is not an issue because the flag will simply be overwritten
> with the same value. However once we fold this flag into the refcount
> later in this series, re-attaching or re-detaching a vma becomes an
> issue since these operations will be incrementing/decrementing a
> refcount.
> Introduce vma_iter_store_new() and vma_iter_store_overwrite() to replace
> vma_iter_store() and avoid re-attaching a vma during vma update. Add
> assertions in vma_mark_attached()/vma_mark_detached() to catch invalid
> usage. Update vma tests to check for vma detached state correctness.
>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@...cle.com>
> ---
> Changes since v9 [1]:
> - Change VM_BUG_ON_VMA() to WARN_ON_ONCE() in vma_assert_{attached|detached},
> per Lorenzo Stoakes
> - Rename vma_iter_store() into vma_iter_store_new(), per Lorenzo Stoakes
> - Expand changelog, per Lorenzo Stoakes
> - Update vma tests to check for vma detached state correctness,
> per Lorenzo Stoakes
>
> [1] https://lore.kernel.org/all/20250111042604.3230628-5-surenb@google.com/
>
> include/linux/mm.h | 14 +++++++++++
> mm/nommu.c | 4 +--
> mm/vma.c | 12 ++++-----
> mm/vma.h | 11 +++++++--
> tools/testing/vma/vma.c | 42 +++++++++++++++++++++++++-------
> tools/testing/vma/vma_internal.h | 10 ++++++++
> 6 files changed, 74 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index cd5ee61e98f2..1b8e72888124 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -821,8 +821,19 @@ static inline void vma_assert_locked(struct vm_area_struct *vma)
> vma_assert_write_locked(vma);
> }
>
> +static inline void vma_assert_attached(struct vm_area_struct *vma)
> +{
> + WARN_ON_ONCE(vma->detached);
> +}
> +
> +static inline void vma_assert_detached(struct vm_area_struct *vma)
> +{
> + WARN_ON_ONCE(!vma->detached);
> +}
> +
> static inline void vma_mark_attached(struct vm_area_struct *vma)
> {
> + vma_assert_detached(vma);
> vma->detached = false;
> }
>
> @@ -830,6 +841,7 @@ static inline void vma_mark_detached(struct vm_area_struct *vma)
> {
> /* When detaching vma should be write-locked */
> vma_assert_write_locked(vma);
> + vma_assert_attached(vma);
> vma->detached = true;
> }
>
> @@ -866,6 +878,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {}
> static inline void vma_start_write(struct vm_area_struct *vma) {}
> static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> { mmap_assert_write_locked(vma->vm_mm); }
> +static inline void vma_assert_attached(struct vm_area_struct *vma) {}
> +static inline void vma_assert_detached(struct vm_area_struct *vma) {}
> static inline void vma_mark_attached(struct vm_area_struct *vma) {}
> static inline void vma_mark_detached(struct vm_area_struct *vma) {}
>
> diff --git a/mm/nommu.c b/mm/nommu.c
> index baa79abdaf03..8b31d8396297 100644
> --- a/mm/nommu.c
> +++ b/mm/nommu.c
> @@ -1191,7 +1191,7 @@ unsigned long do_mmap(struct file *file,
> setup_vma_to_mm(vma, current->mm);
> current->mm->map_count++;
> /* add the VMA to the tree */
> - vma_iter_store(&vmi, vma);
> + vma_iter_store_new(&vmi, vma);
>
> /* we flush the region from the icache only when the first executable
> * mapping of it is made */
> @@ -1356,7 +1356,7 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
>
> setup_vma_to_mm(vma, mm);
> setup_vma_to_mm(new, mm);
> - vma_iter_store(vmi, new);
> + vma_iter_store_new(vmi, new);
> mm->map_count++;
> return 0;
>
> diff --git a/mm/vma.c b/mm/vma.c
> index 498507d8a262..f72b73f57451 100644
> --- a/mm/vma.c
> +++ b/mm/vma.c
> @@ -320,7 +320,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
> * us to insert it before dropping the locks
> * (it may either follow vma or precede it).
> */
> - vma_iter_store(vmi, vp->insert);
> + vma_iter_store_new(vmi, vp->insert);
> mm->map_count++;
> }
>
> @@ -700,7 +700,7 @@ static int commit_merge(struct vma_merge_struct *vmg)
> vmg->__adjust_middle_start ? vmg->middle : NULL);
> vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff);
> vmg_adjust_set_range(vmg);
> - vma_iter_store(vmg->vmi, vmg->target);
> + vma_iter_store_overwrite(vmg->vmi, vmg->target);
>
> vma_complete(&vp, vmg->vmi, vma->vm_mm);
>
> @@ -1707,7 +1707,7 @@ int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
> return -ENOMEM;
>
> vma_start_write(vma);
> - vma_iter_store(&vmi, vma);
> + vma_iter_store_new(&vmi, vma);
> vma_link_file(vma);
> mm->map_count++;
> validate_mm(mm);
> @@ -2386,7 +2386,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
>
> /* Lock the VMA since it is modified after insertion into VMA tree */
> vma_start_write(vma);
> - vma_iter_store(vmi, vma);
> + vma_iter_store_new(vmi, vma);
> map->mm->map_count++;
> vma_link_file(vma);
>
> @@ -2862,7 +2862,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
> anon_vma_interval_tree_pre_update_vma(vma);
> vma->vm_end = address;
> /* Overwrite old entry in mtree. */
> - vma_iter_store(&vmi, vma);
> + vma_iter_store_overwrite(&vmi, vma);
> anon_vma_interval_tree_post_update_vma(vma);
>
> perf_event_mmap(vma);
> @@ -2942,7 +2942,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
> vma->vm_start = address;
> vma->vm_pgoff -= grow;
> /* Overwrite old entry in mtree. */
> - vma_iter_store(&vmi, vma);
> + vma_iter_store_overwrite(&vmi, vma);
> anon_vma_interval_tree_post_update_vma(vma);
>
> perf_event_mmap(vma);
> diff --git a/mm/vma.h b/mm/vma.h
> index bffb56afce5f..55be77ff042f 100644
> --- a/mm/vma.h
> +++ b/mm/vma.h
> @@ -413,9 +413,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
> }
>
> /* Store a VMA with preallocated memory */
> -static inline void vma_iter_store(struct vma_iterator *vmi,
> - struct vm_area_struct *vma)
> +static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
> + struct vm_area_struct *vma)
> {
> + vma_assert_attached(vma);
>
> #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
> if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
> @@ -438,7 +439,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
>
> __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
> mas_store_prealloc(&vmi->mas, vma);
> +}
> +
> +static inline void vma_iter_store_new(struct vma_iterator *vmi,
> + struct vm_area_struct *vma)
> +{
> vma_mark_attached(vma);
> + vma_iter_store_overwrite(vmi, vma);
> }
>
> static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
> diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c
> index c7ffa71841ca..11f761769b5b 100644
> --- a/tools/testing/vma/vma.c
> +++ b/tools/testing/vma/vma.c
> @@ -74,10 +74,22 @@ static struct vm_area_struct *alloc_vma(struct mm_struct *mm,
> ret->vm_end = end;
> ret->vm_pgoff = pgoff;
> ret->__vm_flags = flags;
> + vma_assert_detached(ret);
>
> return ret;
> }
>
> +/* Helper function to allocate a VMA and link it to the tree. */
> +static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma)
> +{
> + int res;
> +
> + res = vma_link(mm, vma);
> + if (!res)
> + vma_assert_attached(vma);
> + return res;
> +}
> +
> /* Helper function to allocate a VMA and link it to the tree. */
> static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
> unsigned long start,
> @@ -90,7 +102,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
> if (vma == NULL)
> return NULL;
>
> - if (vma_link(mm, vma)) {
> + if (attach_vma(mm, vma)) {
> vm_area_free(vma);
> return NULL;
> }
> @@ -108,6 +120,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm,
> /* Helper function which provides a wrapper around a merge new VMA operation. */
> static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
> {
> + struct vm_area_struct *vma;
> /*
> * For convenience, get prev and next VMAs. Which the new VMA operation
> * requires.
> @@ -116,7 +129,11 @@ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
> vmg->prev = vma_prev(vmg->vmi);
> vma_iter_next_range(vmg->vmi);
>
> - return vma_merge_new_range(vmg);
> + vma = vma_merge_new_range(vmg);
> + if (vma)
> + vma_assert_attached(vma);
> +
> + return vma;
> }
>
> /*
> @@ -125,7 +142,12 @@ static struct vm_area_struct *merge_new(struct vma_merge_struct *vmg)
> */
> static struct vm_area_struct *merge_existing(struct vma_merge_struct *vmg)
> {
> - return vma_merge_existing_range(vmg);
> + struct vm_area_struct *vma;
> +
> + vma = vma_merge_existing_range(vmg);
> + if (vma)
> + vma_assert_attached(vma);
> + return vma;
> }
>
> /*
> @@ -260,8 +282,8 @@ static bool test_simple_merge(void)
> .pgoff = 1,
> };
>
> - ASSERT_FALSE(vma_link(&mm, vma_left));
> - ASSERT_FALSE(vma_link(&mm, vma_right));
> + ASSERT_FALSE(attach_vma(&mm, vma_left));
> + ASSERT_FALSE(attach_vma(&mm, vma_right));
>
> vma = merge_new(&vmg);
> ASSERT_NE(vma, NULL);
> @@ -285,7 +307,7 @@ static bool test_simple_modify(void)
> struct vm_area_struct *init_vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
> VMA_ITERATOR(vmi, &mm, 0x1000);
>
> - ASSERT_FALSE(vma_link(&mm, init_vma));
> + ASSERT_FALSE(attach_vma(&mm, init_vma));
>
> /*
> * The flags will not be changed, the vma_modify_flags() function
> @@ -351,7 +373,7 @@ static bool test_simple_expand(void)
> .pgoff = 0,
> };
>
> - ASSERT_FALSE(vma_link(&mm, vma));
> + ASSERT_FALSE(attach_vma(&mm, vma));
>
> ASSERT_FALSE(expand_existing(&vmg));
>
> @@ -372,7 +394,7 @@ static bool test_simple_shrink(void)
> struct vm_area_struct *vma = alloc_vma(&mm, 0, 0x3000, 0, flags);
> VMA_ITERATOR(vmi, &mm, 0);
>
> - ASSERT_FALSE(vma_link(&mm, vma));
> + ASSERT_FALSE(attach_vma(&mm, vma));
>
> ASSERT_FALSE(vma_shrink(&vmi, vma, 0, 0x1000, 0));
>
> @@ -1522,11 +1544,11 @@ static bool test_copy_vma(void)
>
> vma = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
> vma_new = copy_vma(&vma, 0, 0x2000, 0, &need_locks);
> -
> ASSERT_NE(vma_new, vma);
> ASSERT_EQ(vma_new->vm_start, 0);
> ASSERT_EQ(vma_new->vm_end, 0x2000);
> ASSERT_EQ(vma_new->vm_pgoff, 0);
> + vma_assert_attached(vma_new);
>
> cleanup_mm(&mm, &vmi);
>
> @@ -1535,6 +1557,7 @@ static bool test_copy_vma(void)
> vma = alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
> vma_next = alloc_and_link_vma(&mm, 0x6000, 0x8000, 6, flags);
> vma_new = copy_vma(&vma, 0x4000, 0x2000, 4, &need_locks);
> + vma_assert_attached(vma_new);
>
> ASSERT_EQ(vma_new, vma_next);
>
> @@ -1576,6 +1599,7 @@ static bool test_expand_only_mode(void)
> ASSERT_EQ(vma->vm_pgoff, 3);
> ASSERT_TRUE(vma_write_started(vma));
> ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
> + vma_assert_attached(vma);
>
> cleanup_mm(&mm, &vmi);
> return true;
> diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h
> index f93f7f74f97b..34277842156c 100644
> --- a/tools/testing/vma/vma_internal.h
> +++ b/tools/testing/vma/vma_internal.h
> @@ -470,6 +470,16 @@ static inline void vma_lock_init(struct vm_area_struct *vma)
> vma->vm_lock_seq = UINT_MAX;
> }
>
> +static inline void vma_assert_attached(struct vm_area_struct *vma)
> +{
> + WARN_ON_ONCE(vma->detached);
> +}
> +
> +static inline void vma_assert_detached(struct vm_area_struct *vma)
> +{
> + WARN_ON_ONCE(!vma->detached);
> +}
> +
> static inline void vma_assert_write_locked(struct vm_area_struct *);
> static inline void vma_mark_attached(struct vm_area_struct *vma)
> {
> --
> 2.48.1.601.g30ceb7b040-goog
>
Powered by blists - more mailing lists