lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <254095d7-93f4-4994-b457-906579a066cd@lucifer.local>
Date: Wed, 13 Aug 2025 06:36:19 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Peter Zijlstra <peterz@...radead.org>
Cc: tglx@...utronix.de, linux-kernel@...r.kernel.org,
        torvalds@...uxfoundation.org, mingo@...nel.org, namhyung@...nel.org,
        acme@...hat.com, kees@...nel.org
Subject: Re: [PATCH v3 02/15] perf: Split out mlock limit handling

On Tue, Aug 12, 2025 at 12:39:00PM +0200, Peter Zijlstra wrote:
> From: Thomas Gleixner <tglx@...utronix.de>
>
> To prepare for splitting the buffer allocation out into seperate functions

NIT: Same comment as 1/2, seperate -> separate, again doesn't hugely matter
but just FYI!

> for the ring buffer and the AUX buffer, split out mlock limit handling into
> a helper function, which can be called from both.
>
> No functional change intended.
>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
> Link: https://lkml.kernel.org/r/20250811070620.463634790@linutronix.de
> ---
>  kernel/events/core.c |   77 +++++++++++++++++++++++++--------------------------
>  1 file changed, 38 insertions(+), 39 deletions(-)
>
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -6927,17 +6927,49 @@ static int map_range(struct perf_buffer
>  	return err;
>  }
>
> +static bool perf_mmap_calc_limits(struct vm_area_struct *vma, long *user_extra, long *extra)
> +{
> +	unsigned long user_locked, user_lock_limit, locked, lock_limit;
> +	struct user_struct *user = current_user();
> +
> +	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
> +	/* Increase the limit linearly with more CPUs */
> +	user_lock_limit *= num_online_cpus();
> +
> +	user_locked = atomic_long_read(&user->locked_vm);
> +
> +	/*
> +	 * sysctl_perf_event_mlock may have changed, so that
> +	 *     user->locked_vm > user_lock_limit
> +	 */
> +	if (user_locked > user_lock_limit)
> +		user_locked = user_lock_limit;
> +	user_locked += *user_extra;
> +
> +	if (user_locked > user_lock_limit) {
> +		/*
> +		 * charge locked_vm until it hits user_lock_limit;
> +		 * charge the rest from pinned_vm
> +		 */
> +		*extra = user_locked - user_lock_limit;
> +		*user_extra -= *extra;
> +	}
> +
> +	lock_limit = rlimit(RLIMIT_MEMLOCK);
> +	lock_limit >>= PAGE_SHIFT;
> +	locked = atomic64_read(&vma->vm_mm->pinned_vm) + *extra;
> +
> +	return locked <= lock_limit || !perf_is_paranoid() || capable(CAP_IPC_LOCK);
> +}
> +
>  static int perf_mmap(struct file *file, struct vm_area_struct *vma)
>  {
>  	struct perf_event *event = file->private_data;
> -	unsigned long user_locked, user_lock_limit;
>  	struct user_struct *user = current_user();
> +	unsigned long vma_size, nr_pages;
> +	long user_extra = 0, extra = 0;
>  	struct mutex *aux_mutex = NULL;
>  	struct perf_buffer *rb = NULL;
> -	unsigned long locked, lock_limit;
> -	unsigned long vma_size;
> -	unsigned long nr_pages;
> -	long user_extra = 0, extra = 0;
>  	int ret, flags = 0;
>  	mapped_f mapped;
>
> @@ -7063,38 +7093,7 @@ static int perf_mmap(struct file *file,
>  		}
>  	}
>
> -	user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
> -
> -	/*
> -	 * Increase the limit linearly with more CPUs:
> -	 */
> -	user_lock_limit *= num_online_cpus();
> -
> -	user_locked = atomic_long_read(&user->locked_vm);
> -
> -	/*
> -	 * sysctl_perf_event_mlock may have changed, so that
> -	 *     user->locked_vm > user_lock_limit
> -	 */
> -	if (user_locked > user_lock_limit)
> -		user_locked = user_lock_limit;
> -	user_locked += user_extra;
> -
> -	if (user_locked > user_lock_limit) {
> -		/*
> -		 * charge locked_vm until it hits user_lock_limit;
> -		 * charge the rest from pinned_vm
> -		 */
> -		extra = user_locked - user_lock_limit;
> -		user_extra -= extra;
> -	}
> -
> -	lock_limit = rlimit(RLIMIT_MEMLOCK);
> -	lock_limit >>= PAGE_SHIFT;
> -	locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
> -
> -	if ((locked > lock_limit) && perf_is_paranoid() &&
> -		!capable(CAP_IPC_LOCK)) {
> +	if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
>  		ret = -EPERM;
>  		goto unlock;
>  	}
>
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ