lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aabd9668-bd22-44de-a4ef-e3bff9c48c28@lucifer.local>
Date: Wed, 13 Aug 2025 08:09:54 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: tglx@...utronix.de, linux-kernel@...r.kernel.org,
        torvalds@...uxfoundation.org, mingo@...nel.org, namhyung@...nel.org,
        acme@...hat.com, kees@...nel.org
Subject: Re: [PATCH v3 15/15] perf: Convert mmap() refcounts to refcount_t

On Tue, Aug 12, 2025 at 12:39:13PM +0200, Peter Zijlstra wrote:
> From: Thomas Gleixner <tglx@...utronix.de>
>
> The recently fixed reference count leaks could have been detected by using
> refcount_t and refcount_t would have mitigated the potential overflow at
> least.
>
> Now that the code is properly structured, convert the mmap() related
> mmap_count variants over to refcount_t.
>
> No functional change intended.
>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>

All LGTM, checked the mmap test passes and no splat, the atomic_inc() ->
atomic_set() on &rb->mmap_count (which here becomes refcount_set()) sorted
that out.

This is a nice fix up as is the rest of the series, kudos to tglx and
additionally you Peter for adding further nice changes + fixing up the bit
we missed.

This is a _massive_ improvement :)

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>

> Link: https://lkml.kernel.org/r/20250811070620.716309215@linutronix.de
> ---
>  include/linux/perf_event.h  |    2 +-
>  kernel/events/core.c        |   40 ++++++++++++++++++++--------------------
>  kernel/events/internal.h    |    4 ++--
>  kernel/events/ring_buffer.c |    2 +-
>  4 files changed, 24 insertions(+), 24 deletions(-)
>
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -859,7 +859,7 @@ struct perf_event {
>
>  	/* mmap bits */
>  	struct mutex			mmap_mutex;
> -	atomic_t			mmap_count;
> +	refcount_t			mmap_count;
>
>  	struct perf_buffer		*rb;
>  	struct list_head		rb_entry;
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -3968,7 +3968,7 @@ static noinline int visit_groups_merge(s
>   */
>  static inline bool event_update_userpage(struct perf_event *event)
>  {
> -	if (likely(!atomic_read(&event->mmap_count)))
> +	if (likely(!refcount_read(&event->mmap_count)))
>  		return false;
>
>  	perf_event_update_time(event);
> @@ -6704,11 +6704,11 @@ static void perf_mmap_open(struct vm_are
>  	struct perf_event *event = vma->vm_file->private_data;
>  	mapped_f mapped = get_mapped(event, event_mapped);
>
> -	atomic_inc(&event->mmap_count);
> -	atomic_inc(&event->rb->mmap_count);
> +	refcount_inc(&event->mmap_count);
> +	refcount_inc(&event->rb->mmap_count);
>
>  	if (vma->vm_pgoff)
> -		atomic_inc(&event->rb->aux_mmap_count);
> +		refcount_inc(&event->rb->aux_mmap_count);
>
>  	if (mapped)
>  		mapped(event, vma->vm_mm);
> @@ -6743,7 +6743,7 @@ static void perf_mmap_close(struct vm_ar
>  	 * to avoid complications.
>  	 */
>  	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
> -	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
> +	    refcount_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
>  		/*
>  		 * Stop all AUX events that are writing to this buffer,
>  		 * so that we can free its AUX pages and corresponding PMU
> @@ -6763,10 +6763,10 @@ static void perf_mmap_close(struct vm_ar
>  		mutex_unlock(&rb->aux_mutex);
>  	}
>
> -	if (atomic_dec_and_test(&rb->mmap_count))
> +	if (refcount_dec_and_test(&rb->mmap_count))
>  		detach_rest = true;
>
> -	if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
> +	if (!refcount_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
>  		goto out_put;
>
>  	ring_buffer_attach(event, NULL);
> @@ -6992,19 +6992,19 @@ static int perf_mmap_rb(struct vm_area_s
>  		if (data_page_nr(event->rb) != nr_pages)
>  			return -EINVAL;
>
> -		if (atomic_inc_not_zero(&event->rb->mmap_count)) {
> +		if (refcount_inc_not_zero(&event->rb->mmap_count)) {
>  			/*
>  			 * Success -- managed to mmap() the same buffer
>  			 * multiple times.
>  			 */
>  			perf_mmap_account(vma, user_extra, extra);
> -			atomic_inc(&event->mmap_count);
> +			refcount_inc(&event->mmap_count);
>  			return 0;
>  		}
>
>  		/*
>  		 * Raced against perf_mmap_close()'s
> -		 * atomic_dec_and_mutex_lock() remove the
> +		 * refcount_dec_and_mutex_lock() remove the
>  		 * event and continue as if !event->rb
>  		 */
>  		ring_buffer_attach(event, NULL);
> @@ -7023,7 +7023,7 @@ static int perf_mmap_rb(struct vm_area_s
>  	if (!rb)
>  		return -ENOMEM;
>
> -	atomic_set(&rb->mmap_count, 1);
> +	refcount_set(&rb->mmap_count, 1);
>  	rb->mmap_user = get_current_user();
>  	rb->mmap_locked = extra;
>
> @@ -7034,7 +7034,7 @@ static int perf_mmap_rb(struct vm_area_s
>  	perf_event_update_userpage(event);
>
>  	perf_mmap_account(vma, user_extra, extra);
> -	atomic_set(&event->mmap_count, 1);
> +	refcount_set(&event->mmap_count, 1);
>
>  	return 0;
>  }
> @@ -7081,15 +7081,15 @@ static int perf_mmap_aux(struct vm_area_
>  	if (!is_power_of_2(nr_pages))
>  		return -EINVAL;
>
> -	if (!atomic_inc_not_zero(&rb->mmap_count))
> +	if (!refcount_inc_not_zero(&rb->mmap_count))
>  		return -EINVAL;
>
>  	if (rb_has_aux(rb)) {
> -		atomic_inc(&rb->aux_mmap_count);
> +		refcount_inc(&rb->aux_mmap_count);
>
>  	} else {
>  		if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
> -			atomic_dec(&rb->mmap_count);
> +			refcount_dec(&rb->mmap_count);
>  			return -EPERM;
>  		}
>
> @@ -7101,16 +7101,16 @@ static int perf_mmap_aux(struct vm_area_
>  		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
>  				   event->attr.aux_watermark, rb_flags);
>  		if (ret) {
> -			atomic_dec(&rb->mmap_count);
> +			refcount_dec(&rb->mmap_count);
>  			return ret;
>  		}
>
> -		atomic_set(&rb->aux_mmap_count, 1);
> +		refcount_set(&rb->aux_mmap_count, 1);
>  		rb->aux_mmap_locked = extra;
>  	}
>
>  	perf_mmap_account(vma, user_extra, extra);
> -	atomic_inc(&event->mmap_count);
> +	refcount_inc(&event->mmap_count);
>
>  	return 0;
>  }
> @@ -13257,7 +13257,7 @@ perf_event_set_output(struct perf_event
>  	mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
>  set:
>  	/* Can't redirect output if we've got an active mmap() */
> -	if (atomic_read(&event->mmap_count))
> +	if (refcount_read(&event->mmap_count))
>  		goto unlock;
>
>  	if (output_event) {
> @@ -13270,7 +13270,7 @@ perf_event_set_output(struct perf_event
>  			goto unlock;
>
>  		/* did we race against perf_mmap_close() */
> -		if (!atomic_read(&rb->mmap_count)) {
> +		if (!refcount_read(&rb->mmap_count)) {
>  			ring_buffer_put(rb);
>  			goto unlock;
>  		}
> --- a/kernel/events/internal.h
> +++ b/kernel/events/internal.h
> @@ -35,7 +35,7 @@ struct perf_buffer {
>  	spinlock_t			event_lock;
>  	struct list_head		event_list;
>
> -	atomic_t			mmap_count;
> +	refcount_t			mmap_count;
>  	unsigned long			mmap_locked;
>  	struct user_struct		*mmap_user;
>
> @@ -47,7 +47,7 @@ struct perf_buffer {
>  	unsigned long			aux_pgoff;
>  	int				aux_nr_pages;
>  	int				aux_overwrite;
> -	atomic_t			aux_mmap_count;
> +	refcount_t			aux_mmap_count;
>  	unsigned long			aux_mmap_locked;
>  	void				(*free_aux)(void *);
>  	refcount_t			aux_refcount;
> --- a/kernel/events/ring_buffer.c
> +++ b/kernel/events/ring_buffer.c
> @@ -400,7 +400,7 @@ void *perf_aux_output_begin(struct perf_
>  	 * the same order, see perf_mmap_close. Otherwise we end up freeing
>  	 * aux pages in this path, which is a bug, because in_atomic().
>  	 */
> -	if (!atomic_read(&rb->aux_mmap_count))
> +	if (!refcount_read(&rb->aux_mmap_count))
>  		goto err;
>
>  	if (!refcount_inc_not_zero(&rb->aux_refcount))
>
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ