[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAP-5=fWDN_DjaVhJWDCiHvnKJnbD4xj8_VMtJz+rQyZKvjvdUw@mail.gmail.com>
Date: Wed, 1 Jun 2022 23:23:18 -0700
From: Ian Rogers <irogers@...gle.com>
To: Namhyung Kim <namhyung@...nel.org>
Cc: Arnaldo Carvalho de Melo <acme@...nel.org>,
Jiri Olsa <jolsa@...nel.org>, Ingo Molnar <mingo@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
LKML <linux-kernel@...r.kernel.org>,
linux-perf-users@...r.kernel.org, Will Deacon <will@...nel.org>,
Waiman Long <longman@...hat.com>,
Boqun Feng <boqun.feng@...il.com>,
Davidlohr Bueso <dave@...olabs.net>
Subject: Re: [PATCH 3/5] perf lock: Handle lock contention tracepoints
On Tue, May 31, 2022 at 11:58 PM Namhyung Kim <namhyung@...nel.org> wrote:
>
> When the lock contention events are used, there's no tracking of
> acquire and release. So the state machine is simplified to use
> UNINITIALIZED -> CONTENDED -> ACQUIRED only.
>
> Note that CONTENDED state is re-entrant since mutex locks can hit two
> or more consecutive contention_begin events for optimistic spinning
> and sleep.
>
> Signed-off-by: Namhyung Kim <namhyung@...nel.org>
Acked-by: Ian Rogers <irogers@...gle.com>
Thanks,
Ian
> ---
> tools/perf/builtin-lock.c | 125 ++++++++++++++++++++++++++++++++++++++
> 1 file changed, 125 insertions(+)
>
> diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
> index 3e3320b8cede..82cbf0f2e4cd 100644
> --- a/tools/perf/builtin-lock.c
> +++ b/tools/perf/builtin-lock.c
> @@ -789,6 +789,124 @@ static int report_lock_release_event(struct evsel *evsel,
> return 0;
> }
>
> +static int report_lock_contention_begin_event(struct evsel *evsel,
> + struct perf_sample *sample)
> +{
> + struct lock_stat *ls;
> + struct thread_stat *ts;
> + struct lock_seq_stat *seq;
> + u64 addr = evsel__intval(evsel, sample, "lock_addr");
> +
> + if (show_thread_stats)
> + addr = sample->tid;
> +
> + ls = lock_stat_findnew(addr, "No name");
> + if (!ls)
> + return -ENOMEM;
> +
> + ts = thread_stat_findnew(sample->tid);
> + if (!ts)
> + return -ENOMEM;
> +
> + seq = get_seq(ts, addr);
> + if (!seq)
> + return -ENOMEM;
> +
> + switch (seq->state) {
> + case SEQ_STATE_UNINITIALIZED:
> + case SEQ_STATE_ACQUIRED:
> + break;
> + case SEQ_STATE_CONTENDED:
> + /*
> + * It can have nested contention begin with mutex spinning,
> + * then we would use the original contention begin event and
> + * ignore the second one.
> + */
> + goto end;
> + case SEQ_STATE_ACQUIRING:
> + case SEQ_STATE_READ_ACQUIRED:
> + case SEQ_STATE_RELEASED:
> + /* broken lock sequence */
> + if (!ls->broken) {
> + ls->broken = 1;
> + bad_hist[BROKEN_CONTENDED]++;
> + }
> + list_del_init(&seq->list);
> + free(seq);
> + goto end;
> + default:
> + BUG_ON("Unknown state of lock sequence found!\n");
> + break;
> + }
> +
> + if (seq->state != SEQ_STATE_CONTENDED) {
> + seq->state = SEQ_STATE_CONTENDED;
> + seq->prev_event_time = sample->time;
> + ls->nr_contended++;
> + }
> +end:
> + return 0;
> +}
> +
> +static int report_lock_contention_end_event(struct evsel *evsel,
> + struct perf_sample *sample)
> +{
> + struct lock_stat *ls;
> + struct thread_stat *ts;
> + struct lock_seq_stat *seq;
> + u64 contended_term;
> + u64 addr = evsel__intval(evsel, sample, "lock_addr");
> +
> + if (show_thread_stats)
> + addr = sample->tid;
> +
> + ls = lock_stat_findnew(addr, "No name");
> + if (!ls)
> + return -ENOMEM;
> +
> + ts = thread_stat_findnew(sample->tid);
> + if (!ts)
> + return -ENOMEM;
> +
> + seq = get_seq(ts, addr);
> + if (!seq)
> + return -ENOMEM;
> +
> + switch (seq->state) {
> + case SEQ_STATE_UNINITIALIZED:
> + goto end;
> + case SEQ_STATE_CONTENDED:
> + contended_term = sample->time - seq->prev_event_time;
> + ls->wait_time_total += contended_term;
> + if (contended_term < ls->wait_time_min)
> + ls->wait_time_min = contended_term;
> + if (ls->wait_time_max < contended_term)
> + ls->wait_time_max = contended_term;
> + break;
> + case SEQ_STATE_ACQUIRING:
> + case SEQ_STATE_ACQUIRED:
> + case SEQ_STATE_READ_ACQUIRED:
> + case SEQ_STATE_RELEASED:
> + /* broken lock sequence */
> + if (!ls->broken) {
> + ls->broken = 1;
> + bad_hist[BROKEN_CONTENDED]++;
> + }
> + list_del_init(&seq->list);
> + free(seq);
> + goto end;
> + default:
> + BUG_ON("Unknown state of lock sequence found!\n");
> + break;
> + }
> +
> + seq->state = SEQ_STATE_ACQUIRED;
> + ls->nr_acquired++;
> + ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired;
> +end:
> + return 0;
> +}
> +
> /* lock oriented handlers */
> /* TODO: handlers for CPU oriented, thread oriented */
> static struct trace_lock_handler report_lock_ops = {
> @@ -796,6 +914,8 @@ static struct trace_lock_handler report_lock_ops = {
> .acquired_event = report_lock_acquired_event,
> .contended_event = report_lock_contended_event,
> .release_event = report_lock_release_event,
> + .contention_begin_event = report_lock_contention_begin_event,
> + .contention_end_event = report_lock_contention_end_event,
> };
>
> static struct trace_lock_handler *trace_handler;
> @@ -1081,6 +1201,11 @@ static int __cmd_report(bool display_info)
> goto out_delete;
> }
>
> + if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) {
> + pr_err("Initializing perf session tracepoint handlers failed\n");
> + goto out_delete;
> + }
> +
> if (setup_output_field(output_fields))
> goto out_delete;
>
> --
> 2.36.1.255.ge46751e96f-goog
>
Powered by blists - more mailing lists