lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAP-5=fVjjSENsdAt+QyBHJ3-yjdwHSLMgZZ_z80YtRQMZvjbiw@mail.gmail.com>
Date: Wed, 2 Jul 2025 20:10:44 -0700
From: Ian Rogers <irogers@...gle.com>
To: Namhyung Kim <namhyung@...nel.org>
Cc: Arnaldo Carvalho de Melo <acme@...nel.org>, Kan Liang <kan.liang@...ux.intel.com>, 
	Jiri Olsa <jolsa@...nel.org>, Adrian Hunter <adrian.hunter@...el.com>, 
	Peter Zijlstra <peterz@...radead.org>, Ingo Molnar <mingo@...nel.org>, 
	LKML <linux-kernel@...r.kernel.org>, linux-perf-users@...r.kernel.org
Subject: Re: [PATCH 7/8] perf sched: Fix memory leaks in 'perf sched latency'

On Wed, Jul 2, 2025 at 6:49 PM Namhyung Kim <namhyung@...nel.org> wrote:
>
> The work_atoms should be freed after use.  Add free_work_atoms() to
> make sure to release all.  It should use list_splice_init() when merging
> atoms to prevent accessing invalid pointers.
>
> Signed-off-by: Namhyung Kim <namhyung@...nel.org>

Reviewed-by: Ian Rogers <irogers@...gle.com>

Thanks,
Ian

> ---
>  tools/perf/builtin-sched.c | 27 ++++++++++++++++++++++++---
>  1 file changed, 24 insertions(+), 3 deletions(-)
>
> diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
> index 087d4eaba5f7160d..4bbebd6ef2e4a791 100644
> --- a/tools/perf/builtin-sched.c
> +++ b/tools/perf/builtin-sched.c
> @@ -1111,6 +1111,21 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
>         atoms->nb_atoms++;
>  }
>
> +static void free_work_atoms(struct work_atoms *atoms)
> +{
> +       struct work_atom *atom, *tmp;
> +
> +       if (atoms == NULL)
> +               return;
> +
> +       list_for_each_entry_safe(atom, tmp, &atoms->work_list, list) {
> +               list_del(&atom->list);
> +               free(atom);
> +       }
> +       thread__zput(atoms->thread);
> +       free(atoms);
> +}
> +
>  static int latency_switch_event(struct perf_sched *sched,
>                                 struct evsel *evsel,
>                                 struct perf_sample *sample,
> @@ -3426,13 +3441,13 @@ static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *d
>                         this->total_runtime += data->total_runtime;
>                         this->nb_atoms += data->nb_atoms;
>                         this->total_lat += data->total_lat;
> -                       list_splice(&data->work_list, &this->work_list);
> +                       list_splice_init(&data->work_list, &this->work_list);
>                         if (this->max_lat < data->max_lat) {
>                                 this->max_lat = data->max_lat;
>                                 this->max_lat_start = data->max_lat_start;
>                                 this->max_lat_end = data->max_lat_end;
>                         }
> -                       zfree(&data);
> +                       free_work_atoms(data);
>                         return;
>                 }
>         }
> @@ -3511,7 +3526,6 @@ static int perf_sched__lat(struct perf_sched *sched)
>                 work_list = rb_entry(next, struct work_atoms, node);
>                 output_lat_thread(sched, work_list);
>                 next = rb_next(next);
> -               thread__zput(work_list->thread);
>         }
>
>         printf(" -----------------------------------------------------------------------------------------------------------------\n");
> @@ -3525,6 +3539,13 @@ static int perf_sched__lat(struct perf_sched *sched)
>
>         rc = 0;
>
> +       while ((next = rb_first_cached(&sched->sorted_atom_root))) {
> +               struct work_atoms *data;
> +
> +               data = rb_entry(next, struct work_atoms, node);
> +               rb_erase_cached(next, &sched->sorted_atom_root);
> +               free_work_atoms(data);
> +       }
>  out_free_cpus_switch_event:
>         free_cpus_switch_event(sched);
>         return rc;
> --
> 2.50.0.727.gbf7dc18ff4-goog
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ