lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAM9d7ciBh2DhH7wEVFfXucxtvaog_BBVkuYcNC4vX_XQDx-QCw@mail.gmail.com>
Date:   Thu, 18 Aug 2022 09:41:44 -0700
From:   Namhyung Kim <namhyung@...nel.org>
To:     Ian Rogers <irogers@...gle.com>
Cc:     Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...hat.com>,
        Arnaldo Carvalho de Melo <acme@...nel.org>,
        Mark Rutland <mark.rutland@....com>,
        Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
        Jiri Olsa <jolsa@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Darren Hart <dvhart@...radead.org>,
        Davidlohr Bueso <dave@...olabs.net>,
        André Almeida <andrealmeid@...lia.com>,
        Nathan Chancellor <nathan@...nel.org>,
        Nick Desaulniers <ndesaulniers@...gle.com>,
        Tom Rix <trix@...hat.com>,
        Athira Rajeev <atrajeev@...ux.vnet.ibm.com>,
        Weiguo Li <liwg06@...mail.com>,
        Pavithra Gurushankar <gpavithrasha@...il.com>,
        Thomas Richter <tmricht@...ux.ibm.com>,
        Ravi Bangoria <ravi.bangoria@....com>,
        Dario Petrillo <dario.pk1@...il.com>,
        Wenyu Liu <liuwenyu7@...wei.com>,
        Hewenliang <hewenliang4@...wei.com>,
        yaowenbin <yaowenbin1@...wei.com>,
        Dave Marchevsky <davemarchevsky@...com>,
        Andrii Nakryiko <andrii@...nel.org>,
        Alexandre Truong <alexandre.truong@....com>,
        Kim Phillips <kim.phillips@....com>,
        Leo Yan <leo.yan@...aro.org>,
        Quentin Monnet <quentin@...valent.com>,
        William Cohen <wcohen@...hat.com>,
        Andres Freund <andres@...razel.de>,
        Song Liu <songliubraving@...com>,
        Adrian Hunter <adrian.hunter@...el.com>,
        Martin Liška <mliska@...e.cz>,
        Colin Ian King <colin.king@...el.com>,
        James Clark <james.clark@....com>,
        Fangrui Song <maskray@...gle.com>,
        Stephane Eranian <eranian@...gle.com>,
        Kajol Jain <kjain@...ux.ibm.com>,
        Andi Kleen <ak@...ux.intel.com>,
        Alexey Bayduraev <alexey.v.bayduraev@...ux.intel.com>,
        Riccardo Mancini <rickyman7@...il.com>,
        Masami Hiramatsu <mhiramat@...nel.org>,
        Christophe JAILLET <christophe.jaillet@...adoo.fr>,
        Zechuan Chen <chenzechuan1@...wei.com>,
        Jason Wang <wangborong@...rlc.com>,
        Lexi Shao <shaolexi@...wei.com>,
        Remi Bernon <rbernon@...eweavers.com>,
        linux-kernel <linux-kernel@...r.kernel.org>,
        linux-perf-users <linux-perf-users@...r.kernel.org>,
        llvm@...ts.linux.dev
Subject: Re: [PATCH v1 5/6] perf mutex: Fix thread safety analysis

On Tue, Aug 16, 2022 at 10:39 PM Ian Rogers <irogers@...gle.com> wrote:
>
> Add annotations to describe lock behavior. Add missing unlocks to
> perf_sched__replay. Alter hist_iter__top_callback as the thread-safety
> analysis cannot follow pointers through local variables.
>
> Signed-off-by: Ian Rogers <irogers@...gle.com>
> ---
>  tools/perf/builtin-sched.c | 8 ++++++++
>  tools/perf/builtin-top.c   | 5 +++--
>  2 files changed, 11 insertions(+), 2 deletions(-)
>
> diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
> index 0f52f73be896..a8a765ed28ce 100644
> --- a/tools/perf/builtin-sched.c
> +++ b/tools/perf/builtin-sched.c
> @@ -658,6 +658,8 @@ static void *thread_func(void *ctx)
>  }
>
>  static void create_tasks(struct perf_sched *sched)
> +       EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
> +       EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
>  {
>         struct task_desc *task;
>         pthread_attr_t attr;
> @@ -687,6 +689,8 @@ static void create_tasks(struct perf_sched *sched)
>  }
>
>  static void wait_for_tasks(struct perf_sched *sched)
> +       EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
> +       EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
>  {
>         u64 cpu_usage_0, cpu_usage_1;
>         struct task_desc *task;
> @@ -738,6 +742,8 @@ static void wait_for_tasks(struct perf_sched *sched)
>  }
>
>  static void run_one_test(struct perf_sched *sched)
> +       EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
> +       EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
>  {
>         u64 T0, T1, delta, avg_delta, fluct;
>
> @@ -3314,6 +3320,8 @@ static int perf_sched__replay(struct perf_sched *sched)
>         for (i = 0; i < sched->replay_repeat; i++)
>                 run_one_test(sched);
>
> +       mutex_unlock(&sched->start_work_mutex);
> +       mutex_unlock(&sched->work_done_wait_mutex);

But this would wake up the replay tasks and let them burn cpus unnecessarily.
Maybe we can make them exit at the moment.


>         return 0;
>  }
>
> diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
> index 3757292bfe86..e832f04e3076 100644
> --- a/tools/perf/builtin-top.c
> +++ b/tools/perf/builtin-top.c
> @@ -196,6 +196,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
>                                         struct hist_entry *he,
>                                         struct perf_sample *sample,
>                                         struct evsel *evsel, u64 ip)
> +       EXCLUSIVE_LOCKS_REQUIRED(he->hists->lock)
>  {
>         struct annotation *notes;
>         struct symbol *sym = he->ms.sym;
> @@ -724,13 +725,13 @@ static void *display_thread(void *arg)
>  static int hist_iter__top_callback(struct hist_entry_iter *iter,
>                                    struct addr_location *al, bool single,
>                                    void *arg)
> +       EXCLUSIVE_LOCKS_REQUIRED(iter->he->hists->lock)
>  {
>         struct perf_top *top = arg;
> -       struct hist_entry *he = iter->he;
>         struct evsel *evsel = iter->evsel;
>
>         if (perf_hpp_list.sym && single)
> -               perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
> +               perf_top__record_precise_ip(top, iter->he, iter->sample, evsel, al->addr);
>
>         hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
>                      !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),

Looks like a separate change.

Thanks,
Namhyung


> --
> 2.37.1.595.g718a3a8f04-goog
>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ