lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250812104019.494205648@infradead.org>
Date: Tue, 12 Aug 2025 12:39:08 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: tglx@...utronix.de
Cc: linux-kernel@...r.kernel.org,
 peterz@...radead.org,
 torvalds@...uxfoundation.org,
 mingo@...nel.org,
 namhyung@...nel.org,
 acme@...hat.com,
 kees@...nel.org
Subject: [PATCH v3 10/15] perf: Split out the AUX buffer allocation

Move the AUX buffer allocation branch into its own function.

Originally-by: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
 kernel/events/core.c |  144 +++++++++++++++++++++++++++------------------------
 1 file changed, 77 insertions(+), 67 deletions(-)

--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6970,6 +6970,82 @@ static void perf_mmap_account(struct vm_
 	atomic64_add(extra, &vma->vm_mm->pinned_vm);
 }
 
+static int perf_mmap_aux(struct vm_area_struct *vma, struct perf_event *event,
+			 unsigned long nr_pages)
+{
+	long extra = 0, user_extra = nr_pages;
+	u64 aux_offset, aux_size;
+	struct perf_buffer *rb;
+	int ret, rb_flags = 0;
+
+	rb = event->rb;
+	if (!rb)
+		return -EINVAL;
+
+	guard(mutex)(&rb->aux_mutex);
+
+	/*
+	 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
+	 * mapped, all subsequent mappings should have the same size
+	 * and offset. Must be above the normal perf buffer.
+	 */
+	aux_offset = READ_ONCE(rb->user_page->aux_offset);
+	aux_size = READ_ONCE(rb->user_page->aux_size);
+
+	if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
+		return -EINVAL;
+
+	if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
+		return -EINVAL;
+
+	/* already mapped with a different offset */
+	if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
+		return -EINVAL;
+
+	if (aux_size != nr_pages * PAGE_SIZE)
+		return -EINVAL;
+
+	/* already mapped with a different size */
+	if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
+		return -EINVAL;
+
+	if (!is_power_of_2(nr_pages))
+		return -EINVAL;
+
+	if (!atomic_inc_not_zero(&rb->mmap_count))
+		return -EINVAL;
+
+	if (rb_has_aux(rb)) {
+		atomic_inc(&rb->aux_mmap_count);
+
+	} else {
+		if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
+			atomic_dec(&rb->mmap_count);
+			return -EPERM;
+		}
+
+		WARN_ON(!rb && event->rb);
+
+		if (vma->vm_flags & VM_WRITE)
+			rb_flags |= RING_BUFFER_WRITABLE;
+
+		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
+				   event->attr.aux_watermark, rb_flags);
+		if (ret) {
+			atomic_dec(&rb->mmap_count);
+			return ret;
+		}
+
+		atomic_set(&rb->aux_mmap_count, 1);
+		rb->aux_mmap_locked = extra;
+	}
+
+	perf_mmap_account(vma, user_extra, extra);
+	atomic_inc(&event->mmap_count);
+
+	return 0;
+}
+
 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct perf_event *event = file->private_data;
@@ -7087,73 +7163,7 @@ static int perf_mmap(struct file *file,
 		perf_mmap_account(vma, user_extra, extra);
 		atomic_inc(&event->mmap_count);
 	} else {
-		/*
-		 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
-		 * mapped, all subsequent mappings should have the same size
-		 * and offset. Must be above the normal perf buffer.
-		 */
-		u64 aux_offset, aux_size;
-
-		rb = event->rb;
-		if (!rb)
-			goto unlock;
-
-		guard(mutex)(&rb->aux_mutex);
-
-		aux_offset = READ_ONCE(rb->user_page->aux_offset);
-		aux_size = READ_ONCE(rb->user_page->aux_size);
-
-		if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
-			goto unlock;
-
-		if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
-			goto unlock;
-
-		/* already mapped with a different offset */
-		if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
-			goto unlock;
-
-		if (aux_size != nr_pages * PAGE_SIZE)
-			goto unlock;
-
-		/* already mapped with a different size */
-		if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
-			goto unlock;
-
-		if (!is_power_of_2(nr_pages))
-			goto unlock;
-
-		if (!atomic_inc_not_zero(&rb->mmap_count))
-			goto unlock;
-
-		if (rb_has_aux(rb)) {
-			atomic_inc(&rb->aux_mmap_count);
-			ret = 0;
-
-		} else {
-			if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
-				ret = -EPERM;
-				atomic_dec(&rb->mmap_count);
-				goto unlock;
-			}
-
-			WARN_ON(!rb && event->rb);
-
-			if (vma->vm_flags & VM_WRITE)
-				flags |= RING_BUFFER_WRITABLE;
-
-			ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
-					   event->attr.aux_watermark, flags);
-			if (ret) {
-				atomic_dec(&rb->mmap_count);
-				goto unlock;
-			}
-
-			atomic_set(&rb->aux_mmap_count, 1);
-			rb->aux_mmap_locked = extra;
-		}
-		perf_mmap_account(vma, user_extra, extra);
-		atomic_inc(&event->mmap_count);
+		ret = perf_mmap_aux(vma, event, nr_pages);
 	}
 
 unlock:



Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ