[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Z79SgFChhckow6Jf@google.com>
Date: Wed, 26 Feb 2025 09:42:24 -0800
From: Namhyung Kim <namhyung@...nel.org>
To: Athira Rajeev <atrajeev@...ux.ibm.com>
Cc: Chun-Tse Shao <ctshao@...gle.com>,
"open list:PERFORMANCE EVENTS SUBSYSTEM" <linux-kernel@...r.kernel.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>, Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Kan Liang <kan.liang@...ux.intel.com>, nick.forrington@....com,
"open list:PERFORMANCE EVENTS SUBSYSTEM" <linux-perf-users@...r.kernel.org>,
bpf@...r.kernel.org
Subject: Re: [PATCH v7 4/4] perf lock: Report owner stack in usermode
Hello,
On Wed, Feb 26, 2025 at 03:27:41PM +0530, Athira Rajeev wrote:
>
>
> > On 25 Feb 2025, at 12:12 AM, Chun-Tse Shao <ctshao@...gle.com> wrote:
> >
> > This patch parses `owner_lock_stat` into a RB tree, enabling ordered
> > reporting of owner lock statistics with stack traces. It also updates
> > the documentation for the `-o` option in contention mode, decouples `-o`
> > from `-t`, and issues a warning to inform users about the new behavior
> > of `-ov`.
> >
> > Example output:
> > $ sudo ~/linux/tools/perf/perf lock con -abvo -Y mutex-spin -E3 perf bench sched pipe
> > ...
> > contended total wait max wait avg wait type caller
> >
> > 171 1.55 ms 20.26 us 9.06 us mutex pipe_read+0x57
> > 0xffffffffac6318e7 pipe_read+0x57
> > 0xffffffffac623862 vfs_read+0x332
> > 0xffffffffac62434b ksys_read+0xbb
> > 0xfffffffface604b2 do_syscall_64+0x82
> > 0xffffffffad00012f entry_SYSCALL_64_after_hwframe+0x76
> > 36 193.71 us 15.27 us 5.38 us mutex pipe_write+0x50
> > 0xffffffffac631ee0 pipe_write+0x50
> > 0xffffffffac6241db vfs_write+0x3bb
> > 0xffffffffac6244ab ksys_write+0xbb
> > 0xfffffffface604b2 do_syscall_64+0x82
> > 0xffffffffad00012f entry_SYSCALL_64_after_hwframe+0x76
> > 4 51.22 us 16.47 us 12.80 us mutex do_epoll_wait+0x24d
> > 0xffffffffac691f0d do_epoll_wait+0x24d
> > 0xffffffffac69249b do_epoll_pwait.part.0+0xb
> > 0xffffffffac693ba5 __x64_sys_epoll_pwait+0x95
> > 0xfffffffface604b2 do_syscall_64+0x82
> > 0xffffffffad00012f entry_SYSCALL_64_after_hwframe+0x76
> >
> > === owner stack trace ===
> >
> > 3 31.24 us 15.27 us 10.41 us mutex pipe_read+0x348
> > 0xffffffffac631bd8 pipe_read+0x348
> > 0xffffffffac623862 vfs_read+0x332
> > 0xffffffffac62434b ksys_read+0xbb
> > 0xfffffffface604b2 do_syscall_64+0x82
> > 0xffffffffad00012f entry_SYSCALL_64_after_hwframe+0x76
> > ...
> >
> > Signed-off-by: Chun-Tse Shao <ctshao@...gle.com>
> > ---
> > tools/perf/Documentation/perf-lock.txt | 5 ++-
> > tools/perf/builtin-lock.c | 22 +++++++++-
> > tools/perf/util/bpf_lock_contention.c | 57 ++++++++++++++++++++++++++
> > tools/perf/util/lock-contention.h | 7 ++++
> > 4 files changed, 87 insertions(+), 4 deletions(-)
> >
> > diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
> > index d3793054f7d3..859dc11a7372 100644
> > --- a/tools/perf/Documentation/perf-lock.txt
> > +++ b/tools/perf/Documentation/perf-lock.txt
> > @@ -179,8 +179,9 @@ CONTENTION OPTIONS
> >
> > -o::
> > --lock-owner::
> > - Show lock contention stat by owners. Implies --threads and
> > - requires --use-bpf.
> > + Show lock contention stat by owners. This option can be combined with -t,
> > + which shows owner's per thread lock stats, or -v, which shows owner's
> > + stacktrace. Requires --use-bpf.
> >
> > -Y::
> > --type-filter=<value>::
> > diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
> > index 9bebc186286f..05e7bc30488a 100644
> > --- a/tools/perf/builtin-lock.c
> > +++ b/tools/perf/builtin-lock.c
> > @@ -1817,6 +1817,22 @@ static void print_contention_result(struct lock_contention *con)
> > break;
> > }
> >
> > + if (con->owner && con->save_callstack && verbose > 0) {
> > + struct rb_root root = RB_ROOT;
> > +
> > + if (symbol_conf.field_sep)
> > + fprintf(lock_output, "# owner stack trace:\n");
> > + else
> > + fprintf(lock_output, "\n=== owner stack trace ===\n\n");
> > + while ((st = pop_owner_stack_trace(con)))
> > + insert_to(&root, st, compare);
> > +
> > + while ((st = pop_from(&root))) {
> > + print_lock_stat(con, st);
> > + free(st);
> > + }
> > + }
> > +
> > if (print_nr_entries) {
> > /* update the total/bad stats */
> > while ((st = pop_from_result())) {
> > @@ -1962,8 +1978,10 @@ static int check_lock_contention_options(const struct option *options,
> > }
> > }
> >
> > - if (show_lock_owner)
> > - show_thread_stats = true;
> > + if (show_lock_owner && !show_thread_stats) {
> > + pr_warning("Now -o try to show owner's callstack instead of pid and comm.\n");
> > + pr_warning("Please use -t option too to keep the old behavior.\n");
> > + }
> >
> > return 0;
> > }
> > diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
> > index 76542b86e83f..16f4deba69ec 100644
> > --- a/tools/perf/util/bpf_lock_contention.c
> > +++ b/tools/perf/util/bpf_lock_contention.c
> > @@ -549,6 +549,63 @@ static const char *lock_contention_get_name(struct lock_contention *con,
> > return name_buf;
> > }
> >
> > +struct lock_stat *pop_owner_stack_trace(struct lock_contention *con)
> > +{
> > + int stacks_fd, stat_fd;
> > + u64 *stack_trace = NULL;
> > + s32 stack_id;
> > + struct contention_key ckey = {};
> > + struct contention_data cdata = {};
> > + size_t stack_size = con->max_stack * sizeof(*stack_trace);
> > + struct lock_stat *st = NULL;
> > +
> > + stacks_fd = bpf_map__fd(skel->maps.owner_stacks);
> > + stat_fd = bpf_map__fd(skel->maps.owner_stat);
> > + if (!stacks_fd || !stat_fd)
> > + goto out_err;
> > +
> > + stack_trace = zalloc(stack_size);
> > + if (stack_trace == NULL)
> > + goto out_err;
> > +
> > + if (bpf_map_get_next_key(stacks_fd, NULL, stack_trace))
> > + goto out_err;
> > +
> > + bpf_map_lookup_elem(stacks_fd, stack_trace, &stack_id);
> > + ckey.stack_id = stack_id;
> > + bpf_map_lookup_elem(stat_fd, &ckey, &cdata);
> > +
> > + st = zalloc(sizeof(struct lock_stat));
> > + if (!st)
> > + goto out_err;
> > +
> > + st->name = strdup(stack_trace[0] ? lock_contention_get_name(con, NULL, stack_trace, 0) :
> > + "unknown");
>
> Hi,
>
> I am hitting a compilation issue with this change. Sorry for responding late. I tried with change from tmp.perf-tools-next and hit below issue:
>
>
> CC util/bpf_lock_contention.o
> util/bpf_lock_contention.c: In function ‘lock_contention_get_name’:
> cc1: error: function may return address of local variable [-Werror=return-local-addr]
> util/bpf_lock_contention.c:470:45: note: declared here
> 470 | struct contention_task_data task;
> | ^~~~
> cc1: all warnings being treated as errors
> make[4]: *** [/root/perf-tools-next/tools/build/Makefile.build:85: util/bpf_lock_contention.o] Error 1
> make[4]: *** Waiting for unfinished jobs....
> LD perf-in.o
> make[3]: *** [/root/perf-tools-next/tools/build/Makefile.build:138: util] Error 2
> make[2]: *** [Makefile.perf:822: perf-util-in.o] Error 2
> make[1]: *** [Makefile.perf:321: sub-make] Error 2
> make: *** [Makefile:76: all] Error 2
Thanks for the report. I've noticed that and also found this error:
In file included from util/lock-contention.c:4:0:
util/lock-contention.h:192:19: error: no previous prototype for 'pop_owner_stack_trace' [-Werror=missing-prototypes]
struct lock_stat *pop_owner_stack_trace(struct lock_contention *con)
^~~~~~~~~~~~~~~~~~~~~
util/lock-contention.h: In function 'pop_owner_stack_trace':
util/lock-contention.h:192:65: error: unused parameter 'con' [-Werror=unused-parameter]
struct lock_stat *pop_owner_stack_trace(struct lock_contention *con)
^~~
Removed this series from tmp.perf-tools-next.
Thanks,
Namhyung
>
>
> Code snippet:
>
> if (con->aggr_mode == LOCK_AGGR_TASK) {
> struct contention_task_data task;
> int pid = key->pid;
> int task_fd = bpf_map__fd(skel->maps.task_data);
>
> /* do not update idle comm which contains CPU number */
> if (pid) {
> struct thread *t = machine__findnew_thread(machine, /*pid=*/-1, pid);
>
> if (t == NULL)
> return name;
> if (!bpf_map_lookup_elem(task_fd, &pid, &task) &&
> thread__set_comm(t, task.comm, /*timestamp=*/0))
> name = task.comm;
> }
> return name;
> }
>
>
> We are calling lock_contention_get_name with second argument as NULL .
> Though error above points to “contention_task_data”, I think the local variable here is for “name” ?
>
>
> Thanks
> Athira
>
> > + if (!st->name)
> > + goto out_err;
> > +
> > + st->flags = cdata.flags;
> > + st->nr_contended = cdata.count;
> > + st->wait_time_total = cdata.total_time;
> > + st->wait_time_max = cdata.max_time;
> > + st->wait_time_min = cdata.min_time;
> > + st->callstack = stack_trace;
> > +
> > + if (cdata.count)
> > + st->avg_wait_time = cdata.total_time / cdata.count;
> > +
> > + bpf_map_delete_elem(stacks_fd, stack_trace);
> > + bpf_map_delete_elem(stat_fd, &ckey);
> > +
> > + return st;
> > +
> > +out_err:
> > + free(stack_trace);
> > + free(st);
> > +
> > + return NULL;
> > +}
> > +
> > int lock_contention_read(struct lock_contention *con)
> > {
> > int fd, stack, err = 0;
> > diff --git a/tools/perf/util/lock-contention.h b/tools/perf/util/lock-contention.h
> > index a09f7fe877df..97fd33c57f17 100644
> > --- a/tools/perf/util/lock-contention.h
> > +++ b/tools/perf/util/lock-contention.h
> > @@ -168,6 +168,8 @@ int lock_contention_stop(void);
> > int lock_contention_read(struct lock_contention *con);
> > int lock_contention_finish(struct lock_contention *con);
> >
> > +struct lock_stat *pop_owner_stack_trace(struct lock_contention *con);
> > +
> > #else /* !HAVE_BPF_SKEL */
> >
> > static inline int lock_contention_prepare(struct lock_contention *con __maybe_unused)
> > @@ -187,6 +189,11 @@ static inline int lock_contention_read(struct lock_contention *con __maybe_unuse
> > return 0;
> > }
> >
> > +struct lock_stat *pop_owner_stack_trace(struct lock_contention *con)
> > +{
> > + return NULL;
> > +}
> > +
> > #endif /* HAVE_BPF_SKEL */
> >
> > #endif /* PERF_LOCK_CONTENTION_H */
> > --
> > 2.48.1.658.g4767266eb4-goog
> >
> >
>
Powered by blists - more mailing lists