[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3febb57e-e805-4cb9-8929-e47b957192a6@suse.cz>
Date: Tue, 29 Oct 2024 17:48:41 +0100
From: Vlastimil Babka <vbabka@...e.cz>
To: Andrii Nakryiko <andrii@...nel.org>, linux-trace-kernel@...r.kernel.org,
linux-mm@...ck.org, akpm@...ux-foundation.org, peterz@...radead.org
Cc: oleg@...hat.com, rostedt@...dmis.org, mhiramat@...nel.org,
bpf@...r.kernel.org, linux-kernel@...r.kernel.org, jolsa@...nel.org,
paulmck@...nel.org, willy@...radead.org, surenb@...gle.com,
mjguzik@...il.com, brauner@...nel.org, jannh@...gle.com, mhocko@...nel.org,
shakeel.butt@...ux.dev, hannes@...xchg.org, Liam.Howlett@...cle.com,
lorenzo.stoakes@...cle.com, david@...hat.com, arnd@...db.de,
richard.weiyang@...il.com, zhangpeng.00@...edance.com, linmiaohe@...wei.com,
viro@...iv.linux.org.uk, hca@...ux.ibm.com
Subject: Re: [PATCH v4 tip/perf/core 2/4] mm: Introduce
mmap_lock_speculation_{begin|end}
On 10/28/24 02:08, Andrii Nakryiko wrote:
> From: Suren Baghdasaryan <surenb@...gle.com>
>
> Add helper functions to speculatively perform operations without
> read-locking mmap_lock, expecting that mmap_lock will not be
> write-locked and mm is not modified from under us.
>
> Suggested-by: Peter Zijlstra <peterz@...radead.org>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
> Signed-off-by: Andrii Nakryiko <andrii@...nel.org>
Acked-by: Vlastimil Babka <vbabka@...e.cz>
> ---
> include/linux/mmap_lock.h | 29 +++++++++++++++++++++++++++--
> 1 file changed, 27 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> index 6b3272686860..58dde2e35f7e 100644
> --- a/include/linux/mmap_lock.h
> +++ b/include/linux/mmap_lock.h
> @@ -71,6 +71,7 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
> }
>
> #ifdef CONFIG_PER_VMA_LOCK
> +
> static inline void mm_lock_seqcount_init(struct mm_struct *mm)
> {
> seqcount_init(&mm->mm_lock_seq);
> @@ -86,11 +87,35 @@ static inline void mm_lock_seqcount_end(struct mm_struct *mm)
> do_raw_write_seqcount_end(&mm->mm_lock_seq);
> }
>
> -#else
> +static inline bool mmap_lock_speculation_begin(struct mm_struct *mm, unsigned int *seq)
> +{
> + *seq = raw_read_seqcount(&mm->mm_lock_seq);
> + /* Allow speculation if mmap_lock is not write-locked */
> + return (*seq & 1) == 0;
> +}
> +
> +static inline bool mmap_lock_speculation_end(struct mm_struct *mm, unsigned int seq)
> +{
> + return !do_read_seqcount_retry(&mm->mm_lock_seq, seq);
> +}
> +
> +#else /* CONFIG_PER_VMA_LOCK */
> +
> static inline void mm_lock_seqcount_init(struct mm_struct *mm) {}
> static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {}
> static inline void mm_lock_seqcount_end(struct mm_struct *mm) {}
> -#endif
> +
> +static inline bool mmap_lock_speculation_begin(struct mm_struct *mm, unsigned int *seq)
> +{
> + return false;
> +}
> +
> +static inline bool mmap_lock_speculation_end(struct mm_struct *mm, unsigned int seq)
> +{
> + return false;
> +}
> +
> +#endif /* CONFIG_PER_VMA_LOCK */
>
> static inline void mmap_init_lock(struct mm_struct *mm)
> {
Powered by blists - more mailing lists