[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20241104135519.582252957@infradead.org>
Date: Mon, 04 Nov 2024 14:39:27 +0100
From: Peter Zijlstra <peterz@...radead.org>
To: mingo@...nel.org,
lucas.demarchi@...el.com
Cc: linux-kernel@...r.kernel.org,
peterz@...radead.org,
willy@...radead.org,
acme@...nel.org,
namhyung@...nel.org,
mark.rutland@....com,
alexander.shishkin@...ux.intel.com,
jolsa@...nel.org,
irogers@...gle.com,
adrian.hunter@...el.com,
kan.liang@...ux.intel.com
Subject: [PATCH 18/19] perf: Lift event->mmap_mutex in perf_mmap()
This puts 'all' of perf_mmap() under single event->mmap_mutex.
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
kernel/events/core.c | 20 ++++++++------------
1 file changed, 8 insertions(+), 12 deletions(-)
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6626,7 +6626,7 @@ static int perf_mmap(struct file *file,
unsigned long vma_size;
unsigned long nr_pages;
long user_extra = 0, extra = 0;
- int ret = 0, flags = 0;
+ int ret, flags = 0;
/*
* Don't allow mmap() of inherited per-task counters. This would
@@ -6654,6 +6654,9 @@ static int perf_mmap(struct file *file,
user_extra = nr_pages;
+ mutex_lock(&event->mmap_mutex);
+ ret = -EINVAL;
+
if (vma->vm_pgoff == 0) {
nr_pages -= 1;
@@ -6662,16 +6665,13 @@ static int perf_mmap(struct file *file,
* can do bitmasks instead of modulo.
*/
if (nr_pages != 0 && !is_power_of_2(nr_pages))
- return -EINVAL;
+ goto unlock;
WARN_ON_ONCE(event->ctx->parent_ctx);
- mutex_lock(&event->mmap_mutex);
if (event->rb) {
- if (data_page_nr(event->rb) != nr_pages) {
- ret = -EINVAL;
+ if (data_page_nr(event->rb) != nr_pages)
goto unlock;
- }
if (atomic_inc_not_zero(&event->rb->mmap_count)) {
/*
@@ -6698,12 +6698,6 @@ static int perf_mmap(struct file *file,
*/
u64 aux_offset, aux_size;
- if (!event->rb)
- return -EINVAL;
-
- mutex_lock(&event->mmap_mutex);
- ret = -EINVAL;
-
rb = event->rb;
if (!rb)
goto aux_unlock;
@@ -6813,6 +6807,8 @@ static int perf_mmap(struct file *file,
rb->aux_mmap_locked = extra;
}
+ ret = 0;
+
unlock:
if (!ret) {
atomic_long_add(user_extra, &user->locked_vm);
Powered by blists - more mailing lists