[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20241216213753.GD9803@noisy.programming.kicks-ass.net>
Date: Mon, 16 Dec 2024 22:37:53 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: Suren Baghdasaryan <surenb@...gle.com>
Cc: akpm@...ux-foundation.org, willy@...radead.org, liam.howlett@...cle.com,
lorenzo.stoakes@...cle.com, mhocko@...e.com, vbabka@...e.cz,
hannes@...xchg.org, mjguzik@...il.com, oliver.sang@...el.com,
mgorman@...hsingularity.net, david@...hat.com, peterx@...hat.com,
oleg@...hat.com, dave@...olabs.net, paulmck@...nel.org,
brauner@...nel.org, dhowells@...hat.com, hdanton@...a.com,
hughd@...gle.com, lokeshgidra@...gle.com, minchan@...gle.com,
jannh@...gle.com, shakeel.butt@...ux.dev, souravpanda@...gle.com,
pasha.tatashin@...een.com, klarasmodin@...il.com, corbet@....net,
linux-doc@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: Re: [PATCH v6 10/16] mm: replace vm_lock and detached flag with a
reference count
On Mon, Dec 16, 2024 at 11:24:13AM -0800, Suren Baghdasaryan wrote:
> +static inline void vma_refcount_put(struct vm_area_struct *vma)
> +{
> + int refcnt;
> +
> + if (!__refcount_dec_and_test(&vma->vm_refcnt, &refcnt)) {
> + rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
> +
> + if (refcnt & VMA_STATE_LOCKED)
> + rcuwait_wake_up(&vma->vm_mm->vma_writer_wait);
> + }
> +}
> +
> /*
> * Try to read-lock a vma. The function is allowed to occasionally yield false
> * locked result to avoid performance overhead, in which case we fall back to
> @@ -710,6 +728,8 @@ static inline void vma_lock_init(struct vm_area_struct *vma)
> */
> static inline bool vma_start_read(struct vm_area_struct *vma)
> {
> + int oldcnt;
> +
> /*
> * Check before locking. A race might cause false locked result.
> * We can use READ_ONCE() for the mm_lock_seq here, and don't need
> @@ -720,13 +740,20 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
> if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence))
> return false;
>
> +
> + rwsem_acquire_read(&vma->vmlock_dep_map, 0, 0, _RET_IP_);
> + /* Limit at VMA_STATE_LOCKED - 2 to leave one count for a writer */
> + if (unlikely(!__refcount_inc_not_zero_limited(&vma->vm_refcnt, &oldcnt,
> + VMA_STATE_LOCKED - 2))) {
> + rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
> return false;
> + }
> + lock_acquired(&vma->vmlock_dep_map, _RET_IP_);
>
> /*
> + * Overflow of vm_lock_seq/mm_lock_seq might produce false locked result.
> * False unlocked result is impossible because we modify and check
> + * vma->vm_lock_seq under vma->vm_refcnt protection and mm->mm_lock_seq
> * modification invalidates all existing locks.
> *
> * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
> @@ -734,10 +761,12 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
> * after it has been unlocked.
> * This pairs with RELEASE semantics in vma_end_write_all().
> */
> + if (oldcnt & VMA_STATE_LOCKED ||
> + unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) {
> + vma_refcount_put(vma);
Suppose we have detach race with a concurrent RCU lookup like:
vma = mas_lookup();
vma_start_write();
mas_detach();
vma_start_read()
rwsem_acquire_read()
inc // success
vma_mark_detach();
dec_and_test // assumes 1->0
// is actually 2->1
if (vm_lock_seq == vma->vm_mm_mm_lock_seq) // true
vma_refcount_put
dec_and_test() // 1->0
*NO* rwsem_release()
> return false;
> }
> +
> return true;
> }
Powered by blists - more mailing lists