[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <25957ac5-e52f-493d-bc32-395a759a41e2@lucifer.local>
Date: Thu, 7 Aug 2025 15:14:41 +0100
From: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
To: Thomas Gleixner <tglx@...utronix.de>
Cc: LKML <linux-kernel@...r.kernel.org>,
Linus Torvalds <torvalds@...uxfoundation.org>,
Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Arnaldo Carvalho de Melo <acme@...hat.com>,
Kees Cook <kees@...nel.org>
Subject: Re: [patch 2/6] perf/core: Split out mlock limit handling
On Wed, Aug 06, 2025 at 10:12:55PM +0200, Thomas Gleixner wrote:
> To prepare for splitting the buffer allocation out into seperate functions
> for the ring buffer and the AUX buffer, split out mlock limit handling into
> a helper function, which can be called from both.
>
> No functional change intended.
>
> Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
LGTM, so:
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@...cle.com>
> ---
> kernel/events/core.c | 75 +++++++++++++++++++++++++--------------------------
> 1 file changed, 38 insertions(+), 37 deletions(-)
>
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -6927,17 +6927,49 @@ static int map_range(struct perf_buffer
> return err;
> }
>
> +static bool perf_mmap_calc_limits(struct vm_area_struct *vma, long *user_extra, long *extra)
> +{
> + unsigned long user_locked, user_lock_limit, locked, lock_limit;
> + struct user_struct *user = current_user();
> +
> + user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
> + /* Increase the limit linearly with more CPUs */
> + user_lock_limit *= num_online_cpus();
> +
> + user_locked = atomic_long_read(&user->locked_vm);
> +
> + /*
> + * sysctl_perf_event_mlock may have changed, so that
> + * user->locked_vm > user_lock_limit
> + */
> + if (user_locked > user_lock_limit)
> + user_locked = user_lock_limit;
> + user_locked += *user_extra;
> +
> + if (user_locked > user_lock_limit) {
> + /*
> + * charge locked_vm until it hits user_lock_limit;
> + * charge the rest from pinned_vm
> + */
> + *extra = user_locked - user_lock_limit;
> + *user_extra -= *extra;
> + }
> +
> + lock_limit = rlimit(RLIMIT_MEMLOCK);
> + lock_limit >>= PAGE_SHIFT;
> + locked = atomic64_read(&vma->vm_mm->pinned_vm) + *extra;
> +
> + return locked <= lock_limit || !perf_is_paranoid() || capable(CAP_IPC_LOCK);
> +}
> +
> static int perf_mmap(struct file *file, struct vm_area_struct *vma)
> {
> struct perf_event *event = file->private_data;
> - unsigned long user_locked, user_lock_limit;
> struct user_struct *user = current_user();
> + unsigned long vma_size, nr_pages;
> + long user_extra = 0, extra = 0;
> struct mutex *aux_mutex = NULL;
> struct perf_buffer *rb = NULL;
> - unsigned long locked, lock_limit;
> - unsigned long vma_size;
> - unsigned long nr_pages;
> - long user_extra = 0, extra = 0;
> int ret, flags = 0;
> mapped_f mapped;
>
> @@ -7063,38 +7095,7 @@ static int perf_mmap(struct file *file,
> }
> }
>
> - user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
> -
> - /*
> - * Increase the limit linearly with more CPUs:
> - */
> - user_lock_limit *= num_online_cpus();
> -
> - user_locked = atomic_long_read(&user->locked_vm);
> -
> - /*
> - * sysctl_perf_event_mlock may have changed, so that
> - * user->locked_vm > user_lock_limit
> - */
> - if (user_locked > user_lock_limit)
> - user_locked = user_lock_limit;
> - user_locked += user_extra;
> -
> - if (user_locked > user_lock_limit) {
> - /*
> - * charge locked_vm until it hits user_lock_limit;
> - * charge the rest from pinned_vm
> - */
> - extra = user_locked - user_lock_limit;
> - user_extra -= extra;
> - }
> -
> - lock_limit = rlimit(RLIMIT_MEMLOCK);
> - lock_limit >>= PAGE_SHIFT;
> - locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
> -
> - if ((locked > lock_limit) && perf_is_paranoid() &&
> - !capable(CAP_IPC_LOCK)) {
> + if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
> ret = -EPERM;
> goto unlock;
> }
>
Powered by blists - more mailing lists