[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAJuCfpGgoSYmGSdcf+fZF1mUeNo-M=fzfk7G6ATs5-0TT+zkfQ@mail.gmail.com>
Date: Thu, 12 Sep 2024 14:04:00 -0700
From: Suren Baghdasaryan <surenb@...gle.com>
To: linux-trace-kernel@...r.kernel.org, peterz@...radead.org, oleg@...hat.com
Cc: rostedt@...dmis.org, mhiramat@...nel.org, bpf@...r.kernel.org,
linux-kernel@...r.kernel.org, jolsa@...nel.org, paulmck@...nel.org,
willy@...radead.org, akpm@...ux-foundation.org, linux-mm@...ck.org,
mjguzik@...il.com, brauner@...nel.org, jannh@...gle.com, andrii@...nel.org
Subject: Re: [PATCH v2 1/1] mm: introduce mmap_lock_speculation_{start|end}
On Thu, Sep 12, 2024 at 2:02 PM Suren Baghdasaryan <surenb@...gle.com> wrote:
>
> Add helper functions to speculatively perform operations without
> read-locking mmap_lock, expecting that mmap_lock will not be
> write-locked and mm is not modified from under us.
Here you go. I hope I got the ordering right this time around, but I
would feel much better if Jann reviewed it before it's included in
your next patchset :)
Thanks,
Suren.
>
> Suggested-by: Peter Zijlstra <peterz@...radead.org>
> Signed-off-by: Suren Baghdasaryan <surenb@...gle.com>
> Signed-off-by: Andrii Nakryiko <andrii@...nel.org>
> ---
> Changes since v1 [1]:
> - Made memory barriers in inc_mm_lock_seq and mmap_lock_speculation_end
> more strict, per Jann Horn
>
> [1] https://lore.kernel.org/all/20240906051205.530219-2-andrii@kernel.org/
>
> include/linux/mm_types.h | 3 ++
> include/linux/mmap_lock.h | 74 ++++++++++++++++++++++++++++++++-------
> kernel/fork.c | 3 --
> 3 files changed, 65 insertions(+), 15 deletions(-)
>
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 6e3bdf8e38bc..5d8cdebd42bc 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -887,6 +887,9 @@ struct mm_struct {
> * Roughly speaking, incrementing the sequence number is
> * equivalent to releasing locks on VMAs; reading the sequence
> * number can be part of taking a read lock on a VMA.
> + * Incremented every time mmap_lock is write-locked/unlocked.
> + * Initialized to 0, therefore odd values indicate mmap_lock
> + * is write-locked and even values that it's released.
> *
> * Can be modified under write mmap_lock using RELEASE
> * semantics.
> diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
> index de9dc20b01ba..a281519d0c12 100644
> --- a/include/linux/mmap_lock.h
> +++ b/include/linux/mmap_lock.h
> @@ -71,39 +71,86 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
> }
>
> #ifdef CONFIG_PER_VMA_LOCK
> +static inline void init_mm_lock_seq(struct mm_struct *mm)
> +{
> + mm->mm_lock_seq = 0;
> +}
> +
> /*
> - * Drop all currently-held per-VMA locks.
> - * This is called from the mmap_lock implementation directly before releasing
> - * a write-locked mmap_lock (or downgrading it to read-locked).
> - * This should normally NOT be called manually from other places.
> - * If you want to call this manually anyway, keep in mind that this will release
> - * *all* VMA write locks, including ones from further up the stack.
> + * Increment mm->mm_lock_seq when mmap_lock is write-locked (ACQUIRE semantics)
> + * or write-unlocked (RELEASE semantics).
> */
> -static inline void vma_end_write_all(struct mm_struct *mm)
> +static inline void inc_mm_lock_seq(struct mm_struct *mm, bool acquire)
> {
> mmap_assert_write_locked(mm);
> /*
> * Nobody can concurrently modify mm->mm_lock_seq due to exclusive
> * mmap_lock being held.
> - * We need RELEASE semantics here to ensure that preceding stores into
> - * the VMA take effect before we unlock it with this store.
> - * Pairs with ACQUIRE semantics in vma_start_read().
> */
> - smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
> +
> + if (acquire) {
> + WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1);
> + /*
> + * For ACQUIRE semantics we should ensure no following stores are
> + * reordered to appear before the mm->mm_lock_seq modification.
> + */
> + smp_wmb();
> + } else {
> + /*
> + * We need RELEASE semantics here to ensure that preceding stores
> + * into the VMA take effect before we unlock it with this store.
> + * Pairs with ACQUIRE semantics in vma_start_read().
> + */
> + smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1);
> + }
> +}
> +
> +static inline bool mmap_lock_speculation_start(struct mm_struct *mm, int *seq)
> +{
> + /* Pairs with RELEASE semantics in inc_mm_lock_seq(). */
> + *seq = smp_load_acquire(&mm->mm_lock_seq);
> + /* Allow speculation if mmap_lock is not write-locked */
> + return (*seq & 1) == 0;
> +}
> +
> +static inline bool mmap_lock_speculation_end(struct mm_struct *mm, int seq)
> +{
> + /* Pairs with ACQUIRE semantics in inc_mm_lock_seq(). */
> + smp_rmb();
> + return seq == READ_ONCE(mm->mm_lock_seq);
> }
> +
> #else
> -static inline void vma_end_write_all(struct mm_struct *mm) {}
> +static inline void init_mm_lock_seq(struct mm_struct *mm) {}
> +static inline void inc_mm_lock_seq(struct mm_struct *mm, bool acquire) {}
> +static inline bool mmap_lock_speculation_start(struct mm_struct *mm, int *seq) { return false; }
> +static inline bool mmap_lock_speculation_end(struct mm_struct *mm, int seq) { return false; }
> #endif
>
> +/*
> + * Drop all currently-held per-VMA locks.
> + * This is called from the mmap_lock implementation directly before releasing
> + * a write-locked mmap_lock (or downgrading it to read-locked).
> + * This should normally NOT be called manually from other places.
> + * If you want to call this manually anyway, keep in mind that this will release
> + * *all* VMA write locks, including ones from further up the stack.
> + */
> +static inline void vma_end_write_all(struct mm_struct *mm)
> +{
> + inc_mm_lock_seq(mm, false);
> +}
> +
> static inline void mmap_init_lock(struct mm_struct *mm)
> {
> init_rwsem(&mm->mmap_lock);
> + init_mm_lock_seq(mm);
> }
>
> static inline void mmap_write_lock(struct mm_struct *mm)
> {
> __mmap_lock_trace_start_locking(mm, true);
> down_write(&mm->mmap_lock);
> + inc_mm_lock_seq(mm, true);
> __mmap_lock_trace_acquire_returned(mm, true, true);
> }
>
> @@ -111,6 +158,7 @@ static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
> {
> __mmap_lock_trace_start_locking(mm, true);
> down_write_nested(&mm->mmap_lock, subclass);
> + inc_mm_lock_seq(mm, true);
> __mmap_lock_trace_acquire_returned(mm, true, true);
> }
>
> @@ -120,6 +168,8 @@ static inline int mmap_write_lock_killable(struct mm_struct *mm)
>
> __mmap_lock_trace_start_locking(mm, true);
> ret = down_write_killable(&mm->mmap_lock);
> + if (!ret)
> + inc_mm_lock_seq(mm, true);
> __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
> return ret;
> }
> diff --git a/kernel/fork.c b/kernel/fork.c
> index 61070248a7d3..c86e87ed172b 100644
> --- a/kernel/fork.c
> +++ b/kernel/fork.c
> @@ -1259,9 +1259,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
> seqcount_init(&mm->write_protect_seq);
> mmap_init_lock(mm);
> INIT_LIST_HEAD(&mm->mmlist);
> -#ifdef CONFIG_PER_VMA_LOCK
> - mm->mm_lock_seq = 0;
> -#endif
> mm_pgtables_bytes_init(mm);
> mm->map_count = 0;
> mm->locked_vm = 0;
>
> base-commit: 015bdfcb183759674ba1bd732c3393014e35708b
> --
> 2.46.0.662.g92d0881bb0-goog
>
Powered by blists - more mailing lists