lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <07122584007a6efbaa47d115030ac27947df700a.1768569863.git.lorenzo.stoakes@oracle.com>
Date: Fri, 16 Jan 2026 13:36:46 +0000
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: David Hildenbrand <david@...nel.org>,
        "Liam R . Howlett" <Liam.Howlett@...cle.com>,
        Vlastimil Babka <vbabka@...e.cz>, Mike Rapoport <rppt@...nel.org>,
        Suren Baghdasaryan <surenb@...gle.com>, Michal Hocko <mhocko@...e.com>,
        Shakeel Butt <shakeel.butt@...ux.dev>, Jann Horn <jannh@...gle.com>,
        linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        linux-rt-devel@...ts.linux.dev, Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>, Will Deacon <will@...nel.org>,
        Boqun Feng <boqun.feng@...il.com>, Waiman Long <longman@...hat.com>,
        Sebastian Andrzej Siewior <bigeasy@...utronix.de>,
        Clark Williams <clrkwllms@...nel.org>,
        Steven Rostedt <rostedt@...dmis.org>
Subject: [PATCH RESEND 2/3] mm/vma: add vma_is_*_locked() helpers

Add vma_is_read_locked(), vma_is_write_locked() and vma_is_locked() helpers
and utilise them in vma_assert_locked() and vma_assert_write_locked().

We need to test mmap lock state to correctly test vma write lock state, so
add mmap_is_locked() and mmap_is_write_locked() so we can explicitly
provide means by which to check mmap_lock state also.

These functions will intentionally not be defined if CONFIG_PER_VMA_LOCK is
not set, as they would not make any sense in a context where VMA locks do
not exist.

We are careful in invoking __is_vma_write_locked() - this function asserts
the mmap write lock, so we check that this lock is held before invoking the
function so vma_is_write_locked() can be used in situations where we don't
want an assert failure.

While we're here, we also update __is_vma_write_locked() to accept a const
vm_area_struct pointer so we can consistently have const VMA parameters for
these helpers.

As part of this change we also move mmap_lock_is_contended() up in
include/linux/mmap_lock.h so we group predicates based on mmap lock state
together.

This lays the groundwork for a subsequent change that allows for asserting
that either the mmap lock or VMA lock is held.

Suggested-by: Suren Baghdasaryan <surenb@...gle.com>
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
---
 include/linux/mmap_lock.h | 50 +++++++++++++++++++++++++++++----------
 1 file changed, 38 insertions(+), 12 deletions(-)

diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index b50416fbba20..9f6932ffaaa0 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -66,6 +66,22 @@ static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)

 #endif /* CONFIG_TRACING */

+
+static inline bool mmap_lock_is_contended(struct mm_struct *mm)
+{
+	return rwsem_is_contended(&mm->mmap_lock);
+}
+
+static inline bool mmap_is_locked(const struct mm_struct *mm)
+{
+	return rwsem_is_locked(&mm->mmap_lock);
+}
+
+static inline bool mmap_is_write_locked(const struct mm_struct *mm)
+{
+	return rwsem_is_write_locked(&mm->mmap_lock);
+}
+
 static inline void mmap_assert_locked(const struct mm_struct *mm)
 {
 	rwsem_assert_held(&mm->mmap_lock);
@@ -183,7 +199,8 @@ static inline void vma_end_read(struct vm_area_struct *vma)
 }

 /* WARNING! Can only be used if mmap_lock is expected to be write-locked */
-static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
+static inline bool __is_vma_write_locked(const struct vm_area_struct *vma,
+		unsigned int *mm_lock_seq)
 {
 	mmap_assert_write_locked(vma->vm_mm);

@@ -236,19 +253,33 @@ int vma_start_write_killable(struct vm_area_struct *vma)
 	return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE);
 }

-static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+static inline bool vma_is_read_locked(const struct vm_area_struct *vma)
+{
+	return refcount_read(&vma->vm_refcnt) > 1;
+}
+
+static inline bool vma_is_write_locked(struct vm_area_struct *vma)
 {
 	unsigned int mm_lock_seq;

-	VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+	/* __is_vma_write_locked() requires the mmap write lock. */
+	return mmap_is_write_locked(vma->vm_mm) &&
+		__is_vma_write_locked(vma, &mm_lock_seq);
 }

-static inline void vma_assert_locked(struct vm_area_struct *vma)
+static inline bool vma_is_locked(struct vm_area_struct *vma)
 {
-	unsigned int mm_lock_seq;
+	return vma_is_read_locked(vma) || vma_is_write_locked(vma);
+}
+
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+{
+	VM_BUG_ON_VMA(!vma_is_write_locked(vma), vma);
+}

-	VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
-		      !__is_vma_write_locked(vma, &mm_lock_seq), vma);
+static inline void vma_assert_locked(struct vm_area_struct *vma)
+{
+	VM_BUG_ON_VMA(!vma_is_locked(vma), vma);
 }

 static inline bool vma_is_attached(struct vm_area_struct *vma)
@@ -432,9 +463,4 @@ static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
 	up_read_non_owner(&mm->mmap_lock);
 }

-static inline int mmap_lock_is_contended(struct mm_struct *mm)
-{
-	return rwsem_is_contended(&mm->mmap_lock);
-}
-
 #endif /* _LINUX_MMAP_LOCK_H */
--
2.52.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ