[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230406210611.1622492-5-namhyung@kernel.org>
Date: Thu, 6 Apr 2023 14:06:08 -0700
From: Namhyung Kim <namhyung@...nel.org>
To: Arnaldo Carvalho de Melo <acme@...nel.org>,
Jiri Olsa <jolsa@...nel.org>
Cc: Ian Rogers <irogers@...gle.com>,
Adrian Hunter <adrian.hunter@...el.com>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...nel.org>,
LKML <linux-kernel@...r.kernel.org>,
linux-perf-users@...r.kernel.org, Song Liu <song@...nel.org>,
Hao Luo <haoluo@...gle.com>, bpf@...r.kernel.org,
Juri Lelli <juri.lelli@...hat.com>
Subject: [PATCH 4/7] perf lock contention: Add data failure stat
It's possible to fail to update the data when the lock_stat map is full.
We should check that case and show the number at the end.
$ sudo ./perf lock con -ablv -E3 -- ./perf bench sched messaging
...
contended total wait max wait avg wait address symbol
6157 208.48 ms 69.29 us 33.86 us ffff934c001c1f00 (spinlock)
4030 72.04 ms 61.84 us 17.88 us ffff934c000415c0 (spinlock)
3201 50.30 ms 47.73 us 15.71 us ffff934c2eead850 (spinlock)
=== output for debug ===
bad: 0, total: 13388
bad rate: 0.00 %
histogram of failure reasons
task: 0
stack: 0
time: 0
data: 0 <----- added
Signed-off-by: Namhyung Kim <namhyung@...nel.org>
---
tools/perf/builtin-lock.c | 4 +++-
tools/perf/util/bpf_lock_contention.c | 1 +
tools/perf/util/bpf_skel/lock_contention.bpf.c | 4 +++-
tools/perf/util/lock-contention.h | 1 +
4 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 9b92c7a5aefb..01b318d6c80a 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -1626,7 +1626,7 @@ static void sort_contention_result(void)
static void print_bpf_events(int total, struct lock_contention_fails *fails)
{
/* Output for debug, this have to be removed */
- int broken = fails->task + fails->stack + fails->time;
+ int broken = fails->task + fails->stack + fails->time + fails->data;
if (quiet || total == 0 || (broken == 0 && verbose <= 0))
return;
@@ -1640,7 +1640,9 @@ static void print_bpf_events(int total, struct lock_contention_fails *fails)
pr_info(" %10s: %d\n", "task", fails->task);
pr_info(" %10s: %d\n", "stack", fails->stack);
pr_info(" %10s: %d\n", "time", fails->time);
+ pr_info(" %10s: %d\n", "data", fails->data);
}
+
static void print_contention_result(struct lock_contention *con)
{
struct lock_stat *st;
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
index 8a5d0eb44189..0071058ac3d2 100644
--- a/tools/perf/util/bpf_lock_contention.c
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -262,6 +262,7 @@ int lock_contention_read(struct lock_contention *con)
con->fails.task = skel->bss->task_fail;
con->fails.stack = skel->bss->stack_fail;
con->fails.time = skel->bss->time_fail;
+ con->fails.data = skel->bss->data_fail;
stack_trace = zalloc(stack_size);
if (stack_trace == NULL)
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
index f9d2d792ccc8..cb87c98e5340 100644
--- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -124,6 +124,7 @@ int aggr_mode;
int task_fail;
int stack_fail;
int time_fail;
+int data_fail;
static inline int can_record(u64 *ctx)
{
@@ -380,7 +381,8 @@ int contention_end(u64 *ctx)
if (aggr_mode == LOCK_AGGR_ADDR)
first.flags |= check_lock_type(pelem->lock, pelem->flags);
- bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
+ if (bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST) < 0)
+ __sync_fetch_and_add(&data_fail, 1);
bpf_map_delete_elem(&tstamp, &pid);
return 0;
}
diff --git a/tools/perf/util/lock-contention.h b/tools/perf/util/lock-contention.h
index 10c28302420c..3ed1cad370fc 100644
--- a/tools/perf/util/lock-contention.h
+++ b/tools/perf/util/lock-contention.h
@@ -126,6 +126,7 @@ struct lock_contention_fails {
int task;
int stack;
int time;
+ int data;
};
struct lock_contention {
--
2.40.0.577.gac1e443424-goog
Powered by blists - more mailing lists