[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <a2fca13d-87bd-4eb3-b673-46c538f46e66@suse.cz>
Date: Tue, 5 Aug 2025 16:20:47 +0200
From: Vlastimil Babka <vbabka@...e.cz>
To: Suren Baghdasaryan <surenb@...gle.com>, akpm@...ux-foundation.org
Cc: Liam.Howlett@...cle.com, lorenzo.stoakes@...cle.com, david@...hat.com,
peterx@...hat.com, jannh@...gle.com, hannes@...xchg.org, mhocko@...nel.org,
paulmck@...nel.org, shuah@...nel.org, adobriyan@...il.com,
brauner@...nel.org, josef@...icpanda.com, yebin10@...wei.com,
linux@...ssschuh.net, willy@...radead.org, osalvador@...e.de,
andrii@...nel.org, ryan.roberts@....com, christophe.leroy@...roup.eu,
tjmercier@...gle.com, kaleshsingh@...gle.com, aha310510@...il.com,
linux-kernel@...r.kernel.org, linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org, linux-kselftest@...r.kernel.org
Subject: Re: [PATCH v2 3/3] fs/proc/task_mmu: execute PROCMAP_QUERY ioctl
under per-vma locks
On 8/5/25 1:15 AM, Suren Baghdasaryan wrote:
> Utilize per-vma locks to stabilize vma after lookup without taking
> mmap_lock during PROCMAP_QUERY ioctl execution. If vma lock is
> contended, we fall back to mmap_lock but take it only momentarily
> to lock the vma and release the mmap_lock. In a very unlikely case
> of vm_refcnt overflow, this fall back path will fail and ioctl is
> done under mmap_lock protection.
>
> This change is designed to reduce mmap_lock contention and prevent
> PROCMAP_QUERY ioctl calls from blocking address space updates.
>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
> ---
> fs/proc/task_mmu.c | 81 +++++++++++++++++++++++++++++++++++++---------
> 1 file changed, 65 insertions(+), 16 deletions(-)
>
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 843577aa7a32..1d06ecdbef6f 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -517,28 +517,78 @@ static int pid_maps_open(struct inode *inode, struct file *file)
> PROCMAP_QUERY_VMA_FLAGS \
> )
>
> -static int query_vma_setup(struct mm_struct *mm)
> +#ifdef CONFIG_PER_VMA_LOCK
> +
> +static int query_vma_setup(struct proc_maps_locking_ctx *lock_ctx)
> {
> - return mmap_read_lock_killable(mm);
> + lock_ctx->locked_vma = NULL;
> + lock_ctx->mmap_locked = false;
> +
> + return 0;
> }
>
> -static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct *vma)
> +static void query_vma_teardown(struct proc_maps_locking_ctx *lock_ctx)
> {
> - mmap_read_unlock(mm);
> + if (lock_ctx->mmap_locked)
> + mmap_read_unlock(lock_ctx->mm);
> + else
> + unlock_vma(lock_ctx);
> }
>
> -static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, unsigned long addr)
> +static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_locking_ctx *lock_ctx,
> + unsigned long addr)
> {
> - return find_vma(mm, addr);
> + struct vm_area_struct *vma;
> + struct vma_iterator vmi;
>
Hm I think we can reach here with lock_ctx->mmap_locked being true via
"goto next_vma" in query_matching_vma(). In that case we should just
"return find_vma()" and doing the below is wrong, no?
> + unlock_vma(lock_ctx);
> + rcu_read_lock();
> + vma_iter_init(&vmi, lock_ctx->mm, addr);
> + vma = lock_next_vma(lock_ctx->mm, &vmi, addr);
> + rcu_read_unlock();
> +
> + if (!IS_ERR_OR_NULL(vma)) {
> + lock_ctx->locked_vma = vma;
> + } else if (PTR_ERR(vma) == -EAGAIN) {
> + /* Fallback to mmap_lock on vma->vm_refcnt overflow */
> + mmap_read_lock(lock_ctx->mm);
> + vma = find_vma(lock_ctx->mm, addr);
> + lock_ctx->mmap_locked = true;
> + }
> +
> + return vma;
> }
>
Powered by blists - more mailing lists