[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <4f2fa8d1-0f80-476c-9391-cbec67ec1ada@lucifer.local>
Date: Fri, 1 Aug 2025 12:00:20 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Suren Baghdasaryan <surenb@...gle.com>
Cc: akpm@...ux-foundation.org, jannh@...gle.com, Liam.Howlett@...cle.com,
vbabka@...e.cz, pfalcato@...e.de, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 2/2] mm: change vma_start_read() to drop RCU lock on
failure
On Thu, Jul 31, 2025 at 08:19:19AM -0700, Suren Baghdasaryan wrote:
> vma_start_read() can drop and reacquire RCU lock in certain failure
> cases. It's not apparent that the RCU session started by the caller of
> this function might be interrupted when vma_start_read() fails to lock
> the vma. This might become a source of subtle bugs and to prevent that
> we change the locking rules for vma_start_read() to drop RCU read lock
> upon failure. This way it's more obvious that RCU-protected objects are
> unsafe after vma locking fails.
>
> Suggested-by: Vlastimil Babka <vbabka@...e.cz>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
Thanks for iterating on this so quickly - I think this is a good cleanup so
well worth having.
The change LGTM, so:
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
I also confirmed this fixes the issue locally and ran mm self-tests so feel
free to also add:
Tested-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
> ---
> Changes since v1 [1]:
> - Fixed missing RCU unlock in lock_vma_under_rcu(), per Lorenzo Stoakes
> - Modified comments, per Lorenzo Stoakes
>
> [1] https://lore.kernel.org/all/20250731013405.4066346-2-surenb@google.com/
>
> mm/mmap_lock.c | 86 +++++++++++++++++++++++++++-----------------------
> 1 file changed, 47 insertions(+), 39 deletions(-)
>
> diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c
> index 10826f347a9f..7ea603f26975 100644
> --- a/mm/mmap_lock.c
> +++ b/mm/mmap_lock.c
> @@ -136,15 +136,16 @@ void vma_mark_detached(struct vm_area_struct *vma)
> * Returns the vma on success, NULL on failure to lock and EAGAIN if vma got
> * detached.
> *
> - * WARNING! The vma passed to this function cannot be used if the function
> - * fails to lock it because in certain cases RCU lock is dropped and then
> - * reacquired. Once RCU lock is dropped the vma can be concurently freed.
> + * IMPORTANT: RCU lock must be held upon entering the function, but upon error
> + * IT IS RELEASED. The caller must handle this correctly.
> */
Actually maybe this is better replacing the original, as it is super clear
and direct.
> static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
> struct vm_area_struct *vma)
> {
> + struct mm_struct *other_mm;
> int oldcnt;
>
> + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
> /*
> * Check before locking. A race might cause false locked result.
> * We can use READ_ONCE() for the mm_lock_seq here, and don't need
> @@ -152,8 +153,10 @@ static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
> * we don't rely on for anything - the mm_lock_seq read against which we
> * need ordering is below.
> */
> - if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence))
> - return NULL;
> + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence)) {
> + vma = NULL;
> + goto err;
> + }
>
> /*
> * If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited_acquire()
> @@ -164,34 +167,14 @@ static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
> if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
> VMA_REF_LIMIT))) {
> /* return EAGAIN if vma got detached from under us */
> - return oldcnt ? NULL : ERR_PTR(-EAGAIN);
> + vma = oldcnt ? NULL : ERR_PTR(-EAGAIN);
> + goto err;
> }
>
> rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
>
> - /*
> - * If vma got attached to another mm from under us, that mm is not
> - * stable and can be freed in the narrow window after vma->vm_refcnt
> - * is dropped and before rcuwait_wake_up(mm) is called. Grab it before
> - * releasing vma->vm_refcnt.
> - */
> - if (unlikely(vma->vm_mm != mm)) {
> - /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */
> - struct mm_struct *other_mm = vma->vm_mm;
> -
> - /*
> - * __mmdrop() is a heavy operation and we don't need RCU
> - * protection here. Release RCU lock during these operations.
> - * We reinstate the RCU read lock as the caller expects it to
> - * be held when this function returns even on error.
> - */
> - rcu_read_unlock();
> - mmgrab(other_mm);
> - vma_refcount_put(vma);
> - mmdrop(other_mm);
> - rcu_read_lock();
> - return NULL;
> - }
> + if (unlikely(vma->vm_mm != mm))
> + goto err_unstable;
>
> /*
> * Overflow of vm_lock_seq/mm_lock_seq might produce false locked result.
> @@ -206,10 +189,31 @@ static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
> */
> if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) {
> vma_refcount_put(vma);
> - return NULL;
> + vma = NULL;
> + goto err;
> }
>
> return vma;
> +err:
> + rcu_read_unlock();
> +
> + return vma;
> +err_unstable:
> + /*
> + * If vma got attached to another mm from under us, that mm is not
> + * stable and can be freed in the narrow window after vma->vm_refcnt
> + * is dropped and before rcuwait_wake_up(mm) is called. Grab it before
> + * releasing vma->vm_refcnt.
> + */
> + other_mm = vma->vm_mm; /* use a copy as vma can be freed after we drop vm_refcnt */
> +
> + /* __mmdrop() is a heavy operation, do it after dropping RCU lock. */
> + rcu_read_unlock();
> + mmgrab(other_mm);
> + vma_refcount_put(vma);
> + mmdrop(other_mm);
> +
> + return NULL;
> }
>
> /*
> @@ -223,11 +227,13 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
> MA_STATE(mas, &mm->mm_mt, address, address);
> struct vm_area_struct *vma;
>
> - rcu_read_lock();
> retry:
> + rcu_read_lock();
> vma = mas_walk(&mas);
> - if (!vma)
> + if (!vma) {
> + rcu_read_unlock();
Good :>)
Again, very easy thing to miss.
> goto inval;
> + }
>
> vma = vma_start_read(mm, vma);
> if (IS_ERR_OR_NULL(vma)) {
> @@ -241,6 +247,9 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
> /* Failed to lock the VMA */
> goto inval;
> }
> +
> + rcu_read_unlock();
> +
> /*
> * At this point, we have a stable reference to a VMA: The VMA is
> * locked and we know it hasn't already been isolated.
> @@ -249,16 +258,14 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
> */
>
> /* Check if the vma we locked is the right one. */
> - if (unlikely(address < vma->vm_start || address >= vma->vm_end))
> - goto inval_end_read;
> + if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
> + vma_end_read(vma);
> + goto inval;
> + }
>
> - rcu_read_unlock();
> return vma;
>
> -inval_end_read:
> - vma_end_read(vma);
> inval:
> - rcu_read_unlock();
> count_vm_vma_lock_event(VMA_LOCK_ABORT);
> return NULL;
> }
> @@ -313,6 +320,7 @@ struct vm_area_struct *lock_next_vma(struct mm_struct *mm,
> */
> if (PTR_ERR(vma) == -EAGAIN) {
> /* reset to search from the last address */
> + rcu_read_lock();
> vma_iter_set(vmi, from_addr);
> goto retry;
> }
> @@ -342,9 +350,9 @@ struct vm_area_struct *lock_next_vma(struct mm_struct *mm,
> return vma;
>
> fallback_unlock:
> + rcu_read_unlock();
> vma_end_read(vma);
> fallback:
> - rcu_read_unlock();
> vma = lock_next_vma_under_mmap_lock(mm, vmi, from_addr);
> rcu_read_lock();
> /* Reinitialize the iterator after re-entering rcu read section */
> --
> 2.50.1.552.g942d659e1b-goog
>
Powered by blists - more mailing lists