[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1423c375-dd2f-4ba4-b2e3-97b460d6c9e6@lucifer.local>
Date: Mon, 13 Jan 2025 15:52:35 +0000
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Suren Baghdasaryan <surenb@...gle.com>
Cc: akpm@...ux-foundation.org, peterz@...radead.org, willy@...radead.org,
liam.howlett@...cle.com, david.laight.linux@...il.com, mhocko@...e.com,
vbabka@...e.cz, hannes@...xchg.org, mjguzik@...il.com,
oliver.sang@...el.com, mgorman@...hsingularity.net, david@...hat.com,
peterx@...hat.com, oleg@...hat.com, dave@...olabs.net,
paulmck@...nel.org, brauner@...nel.org, dhowells@...hat.com,
hdanton@...a.com, hughd@...gle.com, lokeshgidra@...gle.com,
minchan@...gle.com, jannh@...gle.com, shakeel.butt@...ux.dev,
souravpanda@...gle.com, pasha.tatashin@...een.com,
klarasmodin@...il.com, richard.weiyang@...il.com, corbet@....net,
linux-doc@...r.kernel.org, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, kernel-team@...roid.com
Subject: Re: [PATCH v9 09/17] mm: uninline the main body of vma_start_write()
On Fri, Jan 10, 2025 at 08:25:56PM -0800, Suren Baghdasaryan wrote:
> vma_start_write() is used in many places and will grow in size very soon.
> It is not used in performance critical paths and uninlining it should
> limit the future code size growth.
> No functional changes.
>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
> Reviewed-by: Vlastimil Babka <vbabka@...e.cz>
LGTM,
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
> ---
> include/linux/mm.h | 12 +++---------
> mm/memory.c | 14 ++++++++++++++
> 2 files changed, 17 insertions(+), 9 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index cbb4e3dbbaed..3432756d95e6 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -787,6 +787,8 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_l
> return (vma->vm_lock_seq == *mm_lock_seq);
> }
>
> +void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq);
> +
> /*
> * Begin writing to a VMA.
> * Exclude concurrent readers under the per-VMA lock until the currently
> @@ -799,15 +801,7 @@ static inline void vma_start_write(struct vm_area_struct *vma)
> if (__is_vma_write_locked(vma, &mm_lock_seq))
> return;
>
> - down_write(&vma->vm_lock.lock);
> - /*
> - * We should use WRITE_ONCE() here because we can have concurrent reads
> - * from the early lockless pessimistic check in vma_start_read().
> - * We don't really care about the correctness of that early check, but
> - * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
> - */
> - WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
> - up_write(&vma->vm_lock.lock);
> + __vma_start_write(vma, mm_lock_seq);
> }
>
> static inline void vma_assert_write_locked(struct vm_area_struct *vma)
> diff --git a/mm/memory.c b/mm/memory.c
> index d0dee2282325..236fdecd44d6 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -6328,6 +6328,20 @@ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
> #endif
>
> #ifdef CONFIG_PER_VMA_LOCK
> +void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq)
> +{
> + down_write(&vma->vm_lock.lock);
> + /*
> + * We should use WRITE_ONCE() here because we can have concurrent reads
> + * from the early lockless pessimistic check in vma_start_read().
> + * We don't really care about the correctness of that early check, but
> + * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
> + */
> + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
> + up_write(&vma->vm_lock.lock);
> +}
> +EXPORT_SYMBOL_GPL(__vma_start_write);
> +
> /*
> * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
> * stable and not isolated. If the VMA is not found or is being modified the
> --
> 2.47.1.613.gc27f4b7a9f-goog
>
Powered by blists - more mailing lists