lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7ff13c42220bc3fc19610e93d5b73637146aa7ae.1752497324.git.lorenzo.stoakes@oracle.com>
Date: Mon, 14 Jul 2025 14:00:40 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: "Liam R . Howlett" <Liam.Howlett@...cle.com>,
        David Hildenbrand <david@...hat.com>, Vlastimil Babka <vbabka@...e.cz>,
        Jann Horn <jannh@...gle.com>, Pedro Falcato <pfalcato@...e.de>,
        linux-mm@...ck.org, linux-kernel@...r.kernel.org,
        Jeff Xu <jeffxu@...omium.org>
Subject: [PATCH 5/5] mm/mseal: rework mseal apply logic

The logic can be simplified - firstly by renaming the inconsistently named
apply_mm_seal() to mseal_apply().

We then wrap mseal_fixup() into the main loop as the logic is simple enough
to not require it, equally it isn't a hugely pleasant pattern in mprotect()
etc. so it's not something we want to perpetuate.

We remove some redundant comments, and then avoid the entirely unnecessary
and slightly bizarre invocation of vma_iter_end() on each loop - really
what we want, given we have asserted there are no gaps in the range - is to
handle start, end being offset into a VMAs. This is easily handled with
MIN()/MAX().

There's no need to have an 'out' label block since on vma_modify_flags()
error we abort anyway.

And by refactoring like this we avoid the rather horrid 'pass pointer to
prev around' pattern used in mprotect() et al.

No functional change intended.

Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
---
 mm/mseal.c | 69 +++++++++++++++++-------------------------------------
 1 file changed, 22 insertions(+), 47 deletions(-)

diff --git a/mm/mseal.c b/mm/mseal.c
index 8e4c605af700..cf28efbac371 100644
--- a/mm/mseal.c
+++ b/mm/mseal.c
@@ -15,60 +15,35 @@
 #include <linux/sched.h>
 #include "internal.h"

-static int mseal_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
-		struct vm_area_struct **prev, unsigned long start,
-		unsigned long end, vm_flags_t newflags)
+static int mseal_apply(struct mm_struct *mm,
+		unsigned long start, unsigned long end)
 {
-	int ret = 0;
-	vm_flags_t oldflags = vma->vm_flags;
-
-	if (newflags == oldflags)
-		goto out;
-
-	vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags);
-	if (IS_ERR(vma)) {
-		ret = PTR_ERR(vma);
-		goto out;
-	}
-
-	vm_flags_set(vma, VM_SEALED);
-out:
-	*prev = vma;
-	return ret;
-}
-
-/*
- * Apply sealing.
- */
-static int apply_mm_seal(unsigned long start, unsigned long end)
-{
-	unsigned long nstart;
 	struct vm_area_struct *vma, *prev;
-	VMA_ITERATOR(vmi, current->mm, start);
+	VMA_ITERATOR(vmi, mm, start);

+	/* We know there are no gaps so this will be non-NULL. */
 	vma = vma_iter_load(&vmi);
-	/*
-	 * Note: check_mm_seal should already checked ENOMEM case.
-	 * so vma should not be null, same for the other ENOMEM cases.
-	 */
 	prev = vma_prev(&vmi);
 	if (start > vma->vm_start)
 		prev = vma;

-	nstart = start;
 	for_each_vma_range(vmi, vma, end) {
-		int error;
-		unsigned long tmp;
-		vm_flags_t newflags;
-
-		newflags = vma->vm_flags | VM_SEALED;
-		tmp = vma->vm_end;
-		if (tmp > end)
-			tmp = end;
-		error = mseal_fixup(&vmi, vma, &prev, nstart, tmp, newflags);
-		if (error)
-			return error;
-		nstart = vma_iter_end(&vmi);
+		unsigned long curr_start, curr_end;
+
+		if (vma->vm_flags & VM_SEALED) {
+			prev = vma;
+			continue;
+		}
+		curr_start = MAX(start, vma->vm_start);
+		curr_end = MIN(vma->vm_end, end);
+
+		vma = vma_modify_flags(&vmi, prev, vma, curr_start, curr_end,
+				vma->vm_flags | VM_SEALED);
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
+		vm_flags_set(vma, VM_SEALED);
+
+		prev = vma;
 	}

 	return 0;
@@ -167,10 +142,10 @@ int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
 	 * reaching the max supported VMAs, however, those cases shall
 	 * be rare.
 	 */
-	ret = apply_mm_seal(start, end);
+	ret = mseal_apply(mm, start, end);

 out:
-	mmap_write_unlock(current->mm);
+	mmap_write_unlock(mm);
 	return ret;
 }

--
2.50.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ