lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240524041032.1048094-5-andrii@kernel.org>
Date: Thu, 23 May 2024 21:10:26 -0700
From: Andrii Nakryiko <andrii@...nel.org>
To: linux-fsdevel@...r.kernel.org,
	brauner@...nel.org,
	viro@...iv.linux.org.uk,
	akpm@...ux-foundation.org
Cc: linux-kernel@...r.kernel.org,
	bpf@...r.kernel.org,
	gregkh@...uxfoundation.org,
	linux-mm@...ck.org,
	liam.howlett@...cle.com,
	surenb@...gle.com,
	rppt@...nel.org,
	Andrii Nakryiko <andrii@...nel.org>
Subject: [PATCH v2 4/9] fs/procfs: use per-VMA RCU-protected locking in PROCMAP_QUERY API

Attempt to use RCU-protected per-VAM lock when looking up requested VMA
as much as possible, only falling back to mmap_lock if per-VMA lock
failed. This is done so that querying of VMAs doesn't interfere with
other critical tasks, like page fault handling.

This has been suggested by mm folks, and we make use of a newly added
internal API that works like find_vma(), but tries to use per-VMA lock.

Signed-off-by: Andrii Nakryiko <andrii@...nel.org>
---
 fs/proc/task_mmu.c | 42 ++++++++++++++++++++++++++++++++++--------
 1 file changed, 34 insertions(+), 8 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 8ad547efd38d..2b14d06d1def 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -389,12 +389,30 @@ static int pid_maps_open(struct inode *inode, struct file *file)
 )
 
 static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
-						 unsigned long addr, u32 flags)
+						 unsigned long addr, u32 flags,
+						 bool *mm_locked)
 {
 	struct vm_area_struct *vma;
+	bool mmap_locked;
+
+	*mm_locked = mmap_locked = false;
 
 next_vma:
-	vma = find_vma(mm, addr);
+	if (!mmap_locked) {
+		/* if we haven't yet acquired mmap_lock, try to use less disruptive per-VMA */
+		vma = find_and_lock_vma_rcu(mm, addr);
+		if (IS_ERR(vma)) {
+			/* failed to take per-VMA lock, fallback to mmap_lock */
+			if (mmap_read_lock_killable(mm))
+				return ERR_PTR(-EINTR);
+
+			*mm_locked = mmap_locked = true;
+			vma = find_vma(mm, addr);
+		}
+	} else {
+		/* if we have mmap_lock, get through the search as fast as possible */
+		vma = find_vma(mm, addr);
+	}
 
 	/* no VMA found */
 	if (!vma)
@@ -428,18 +446,25 @@ static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
 skip_vma:
 	/*
 	 * If the user needs closest matching VMA, keep iterating.
+	 * But before we proceed we might need to unlock current VMA.
 	 */
 	addr = vma->vm_end;
+	if (!mmap_locked)
+		vma_end_read(vma);
 	if (flags & PROCMAP_QUERY_COVERING_OR_NEXT_VMA)
 		goto next_vma;
 no_vma:
-	mmap_read_unlock(mm);
+	if (mmap_locked)
+		mmap_read_unlock(mm);
 	return ERR_PTR(-ENOENT);
 }
 
-static void unlock_vma(struct vm_area_struct *vma)
+static void unlock_vma(struct vm_area_struct *vma, bool mm_locked)
 {
-	mmap_read_unlock(vma->vm_mm);
+	if (mm_locked)
+		mmap_read_unlock(vma->vm_mm);
+	else
+		vma_end_read(vma);
 }
 
 static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
@@ -447,6 +472,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
 	struct procmap_query karg;
 	struct vm_area_struct *vma;
 	struct mm_struct *mm;
+	bool mm_locked;
 	const char *name = NULL;
 	char *name_buf = NULL;
 	__u64 usize;
@@ -475,7 +501,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
 	if (!mm || !mmget_not_zero(mm))
 		return -ESRCH;
 
-	vma = query_matching_vma(mm, karg.query_addr, karg.query_flags);
+	vma = query_matching_vma(mm, karg.query_addr, karg.query_flags, &mm_locked);
 	if (IS_ERR(vma)) {
 		mmput(mm);
 		return PTR_ERR(vma);
@@ -542,7 +568,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
 	}
 
 	/* unlock vma/mm_struct and put mm_struct before copying data to user */
-	unlock_vma(vma);
+	unlock_vma(vma, mm_locked);
 	mmput(mm);
 
 	if (karg.vma_name_size && copy_to_user((void __user *)karg.vma_name_addr,
@@ -558,7 +584,7 @@ static int do_procmap_query(struct proc_maps_private *priv, void __user *uarg)
 	return 0;
 
 out:
-	unlock_vma(vma);
+	unlock_vma(vma, mm_locked);
 	mmput(mm);
 	kfree(name_buf);
 	return err;
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ