[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250812104020.071507932@infradead.org>
Date: Tue, 12 Aug 2025 12:39:13 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: tglx@...utronix.de
Cc: linux-kernel@...r.kernel.org,
peterz@...radead.org,
torvalds@...uxfoundation.org,
mingo@...nel.org,
namhyung@...nel.org,
acme@...hat.com,
kees@...nel.org
Subject: [PATCH v3 15/15] perf: Convert mmap() refcounts to refcount_t
From: Thomas Gleixner <tglx@...utronix.de>
The recently fixed reference count leaks could have been detected by using
refcount_t and refcount_t would have mitigated the potential overflow at
least.
Now that the code is properly structured, convert the mmap() related
mmap_count variants over to refcount_t.
No functional change intended.
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Link: https://lkml.kernel.org/r/20250811070620.716309215@linutronix.de
---
include/linux/perf_event.h | 2 +-
kernel/events/core.c | 40 ++++++++++++++++++++--------------------
kernel/events/internal.h | 4 ++--
kernel/events/ring_buffer.c | 2 +-
4 files changed, 24 insertions(+), 24 deletions(-)
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -859,7 +859,7 @@ struct perf_event {
/* mmap bits */
struct mutex mmap_mutex;
- atomic_t mmap_count;
+ refcount_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3968,7 +3968,7 @@ static noinline int visit_groups_merge(s
*/
static inline bool event_update_userpage(struct perf_event *event)
{
- if (likely(!atomic_read(&event->mmap_count)))
+ if (likely(!refcount_read(&event->mmap_count)))
return false;
perf_event_update_time(event);
@@ -6704,11 +6704,11 @@ static void perf_mmap_open(struct vm_are
struct perf_event *event = vma->vm_file->private_data;
mapped_f mapped = get_mapped(event, event_mapped);
- atomic_inc(&event->mmap_count);
- atomic_inc(&event->rb->mmap_count);
+ refcount_inc(&event->mmap_count);
+ refcount_inc(&event->rb->mmap_count);
if (vma->vm_pgoff)
- atomic_inc(&event->rb->aux_mmap_count);
+ refcount_inc(&event->rb->aux_mmap_count);
if (mapped)
mapped(event, vma->vm_mm);
@@ -6743,7 +6743,7 @@ static void perf_mmap_close(struct vm_ar
* to avoid complications.
*/
if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
- atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
+ refcount_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
/*
* Stop all AUX events that are writing to this buffer,
* so that we can free its AUX pages and corresponding PMU
@@ -6763,10 +6763,10 @@ static void perf_mmap_close(struct vm_ar
mutex_unlock(&rb->aux_mutex);
}
- if (atomic_dec_and_test(&rb->mmap_count))
+ if (refcount_dec_and_test(&rb->mmap_count))
detach_rest = true;
- if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
+ if (!refcount_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
goto out_put;
ring_buffer_attach(event, NULL);
@@ -6992,19 +6992,19 @@ static int perf_mmap_rb(struct vm_area_s
if (data_page_nr(event->rb) != nr_pages)
return -EINVAL;
- if (atomic_inc_not_zero(&event->rb->mmap_count)) {
+ if (refcount_inc_not_zero(&event->rb->mmap_count)) {
/*
* Success -- managed to mmap() the same buffer
* multiple times.
*/
perf_mmap_account(vma, user_extra, extra);
- atomic_inc(&event->mmap_count);
+ refcount_inc(&event->mmap_count);
return 0;
}
/*
* Raced against perf_mmap_close()'s
- * atomic_dec_and_mutex_lock() remove the
+ * refcount_dec_and_mutex_lock() remove the
* event and continue as if !event->rb
*/
ring_buffer_attach(event, NULL);
@@ -7023,7 +7023,7 @@ static int perf_mmap_rb(struct vm_area_s
if (!rb)
return -ENOMEM;
- atomic_set(&rb->mmap_count, 1);
+ refcount_set(&rb->mmap_count, 1);
rb->mmap_user = get_current_user();
rb->mmap_locked = extra;
@@ -7034,7 +7034,7 @@ static int perf_mmap_rb(struct vm_area_s
perf_event_update_userpage(event);
perf_mmap_account(vma, user_extra, extra);
- atomic_set(&event->mmap_count, 1);
+ refcount_set(&event->mmap_count, 1);
return 0;
}
@@ -7081,15 +7081,15 @@ static int perf_mmap_aux(struct vm_area_
if (!is_power_of_2(nr_pages))
return -EINVAL;
- if (!atomic_inc_not_zero(&rb->mmap_count))
+ if (!refcount_inc_not_zero(&rb->mmap_count))
return -EINVAL;
if (rb_has_aux(rb)) {
- atomic_inc(&rb->aux_mmap_count);
+ refcount_inc(&rb->aux_mmap_count);
} else {
if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
- atomic_dec(&rb->mmap_count);
+ refcount_dec(&rb->mmap_count);
return -EPERM;
}
@@ -7101,16 +7101,16 @@ static int perf_mmap_aux(struct vm_area_
ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
event->attr.aux_watermark, rb_flags);
if (ret) {
- atomic_dec(&rb->mmap_count);
+ refcount_dec(&rb->mmap_count);
return ret;
}
- atomic_set(&rb->aux_mmap_count, 1);
+ refcount_set(&rb->aux_mmap_count, 1);
rb->aux_mmap_locked = extra;
}
perf_mmap_account(vma, user_extra, extra);
- atomic_inc(&event->mmap_count);
+ refcount_inc(&event->mmap_count);
return 0;
}
@@ -13257,7 +13257,7 @@ perf_event_set_output(struct perf_event
mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
set:
/* Can't redirect output if we've got an active mmap() */
- if (atomic_read(&event->mmap_count))
+ if (refcount_read(&event->mmap_count))
goto unlock;
if (output_event) {
@@ -13270,7 +13270,7 @@ perf_event_set_output(struct perf_event
goto unlock;
/* did we race against perf_mmap_close() */
- if (!atomic_read(&rb->mmap_count)) {
+ if (!refcount_read(&rb->mmap_count)) {
ring_buffer_put(rb);
goto unlock;
}
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -35,7 +35,7 @@ struct perf_buffer {
spinlock_t event_lock;
struct list_head event_list;
- atomic_t mmap_count;
+ refcount_t mmap_count;
unsigned long mmap_locked;
struct user_struct *mmap_user;
@@ -47,7 +47,7 @@ struct perf_buffer {
unsigned long aux_pgoff;
int aux_nr_pages;
int aux_overwrite;
- atomic_t aux_mmap_count;
+ refcount_t aux_mmap_count;
unsigned long aux_mmap_locked;
void (*free_aux)(void *);
refcount_t aux_refcount;
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -400,7 +400,7 @@ void *perf_aux_output_begin(struct perf_
* the same order, see perf_mmap_close. Otherwise we end up freeing
* aux pages in this path, which is a bug, because in_atomic().
*/
- if (!atomic_read(&rb->aux_mmap_count))
+ if (!refcount_read(&rb->aux_mmap_count))
goto err;
if (!refcount_inc_not_zero(&rb->aux_refcount))
Powered by blists - more mailing lists