[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <994f1ee6-4ab6-431f-a931-61e290afd89f@suse.cz>
Date: Wed, 8 Jan 2025 19:05:52 +0100
From: Vlastimil Babka <vbabka@...e.cz>
To: Suren Baghdasaryan <surenb@...gle.com>, akpm@...ux-foundation.org
Cc: peterz@...radead.org, willy@...radead.org, liam.howlett@...cle.com,
lorenzo.stoakes@...cle.com, mhocko@...e.com, hannes@...xchg.org,
mjguzik@...il.com, oliver.sang@...el.com, mgorman@...hsingularity.net,
david@...hat.com, peterx@...hat.com, oleg@...hat.com, dave@...olabs.net,
paulmck@...nel.org, brauner@...nel.org, dhowells@...hat.com,
hdanton@...a.com, hughd@...gle.com, lokeshgidra@...gle.com,
minchan@...gle.com, jannh@...gle.com, shakeel.butt@...ux.dev,
souravpanda@...gle.com, pasha.tatashin@...een.com, klarasmodin@...il.com,
corbet@....net, linux-doc@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: Re: [PATCH v7 15/17] mm: prepare lock_vma_under_rcu() for vma reuse
possibility
On 12/26/24 18:07, Suren Baghdasaryan wrote:
> Once we make vma cache SLAB_TYPESAFE_BY_RCU, it will be possible for a vma
> to be reused and attached to another mm after lock_vma_under_rcu() locks
> the vma. lock_vma_under_rcu() should ensure that vma_start_read() is using
> the original mm and after locking the vma it should ensure that vma->vm_mm
> has not changed from under us.
>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
Reviewed-by: Vlastimil Babka <vbabka@...e.cz>
> ---
> include/linux/mm.h | 10 ++++++----
> mm/memory.c | 7 ++++---
> 2 files changed, 10 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 40bbe815df11..56a7d70ca5bd 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -730,8 +730,10 @@ static inline void vma_refcount_put(struct vm_area_struct *vma)
> * Try to read-lock a vma. The function is allowed to occasionally yield false
> * locked result to avoid performance overhead, in which case we fall back to
> * using mmap_lock. The function should never yield false unlocked result.
> + * False locked result is possible if mm_lock_seq overflows or if vma gets
> + * reused and attached to a different mm before we lock it.
> */
> -static inline bool vma_start_read(struct vm_area_struct *vma)
> +static inline bool vma_start_read(struct mm_struct *mm, struct vm_area_struct *vma)
> {
> int oldcnt;
>
> @@ -742,7 +744,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
> * we don't rely on for anything - the mm_lock_seq read against which we
> * need ordering is below.
> */
> - if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence))
> + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence))
> return false;
>
>
> @@ -767,7 +769,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
> * This pairs with RELEASE semantics in vma_end_write_all().
> */
> if (unlikely(oldcnt & VMA_LOCK_OFFSET ||
> - vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) {
> + vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) {
> vma_refcount_put(vma);
> return false;
> }
> @@ -905,7 +907,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
>
> static inline void vma_lockdep_init(struct vm_area_struct *vma) {}
> static inline void vma_init_lock(struct vm_area_struct *vma, bool reset_refcnt) {}
> -static inline bool vma_start_read(struct vm_area_struct *vma)
> +static inline bool vma_start_read(struct mm_struct *mm, struct vm_area_struct *vma)
> { return false; }
> static inline void vma_end_read(struct vm_area_struct *vma) {}
> static inline void vma_start_write(struct vm_area_struct *vma) {}
> diff --git a/mm/memory.c b/mm/memory.c
> index 2def47b5dff0..9cc93c2f79f3 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -6414,7 +6414,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
> if (!vma)
> goto inval;
>
> - if (!vma_start_read(vma))
> + if (!vma_start_read(mm, vma))
> goto inval;
>
> /*
> @@ -6424,8 +6424,9 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
> * fields are accessible for RCU readers.
> */
>
> - /* Check since vm_start/vm_end might change before we lock the VMA */
> - if (unlikely(address < vma->vm_start || address >= vma->vm_end))
> + /* Check if the vma we locked is the right one. */
> + if (unlikely(vma->vm_mm != mm ||
> + address < vma->vm_start || address >= vma->vm_end))
> goto inval_end_read;
>
> rcu_read_unlock();
Powered by blists - more mailing lists