[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220823220922.256001-9-irogers@google.com>
Date: Tue, 23 Aug 2022 15:09:12 -0700
From: Ian Rogers <irogers@...gle.com>
To: Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Arnaldo Carvalho de Melo <acme@...nel.org>,
Mark Rutland <mark.rutland@....com>,
Alexander Shishkin <alexander.shishkin@...ux.intel.com>,
Jiri Olsa <jolsa@...nel.org>,
Namhyung Kim <namhyung@...nel.org>,
Thomas Gleixner <tglx@...utronix.de>,
Darren Hart <dvhart@...radead.org>,
Davidlohr Bueso <dave@...olabs.net>,
"André Almeida" <andrealmeid@...lia.com>,
Nathan Chancellor <nathan@...nel.org>,
Nick Desaulniers <ndesaulniers@...gle.com>,
Tom Rix <trix@...hat.com>, Weiguo Li <liwg06@...mail.com>,
Athira Rajeev <atrajeev@...ux.vnet.ibm.com>,
Thomas Richter <tmricht@...ux.ibm.com>,
Ravi Bangoria <ravi.bangoria@....com>,
Dario Petrillo <dario.pk1@...il.com>,
Hewenliang <hewenliang4@...wei.com>,
yaowenbin <yaowenbin1@...wei.com>,
Wenyu Liu <liuwenyu7@...wei.com>,
Song Liu <songliubraving@...com>,
Andrii Nakryiko <andrii@...nel.org>,
Dave Marchevsky <davemarchevsky@...com>,
Leo Yan <leo.yan@...aro.org>,
Kim Phillips <kim.phillips@....com>,
Pavithra Gurushankar <gpavithrasha@...il.com>,
Alexandre Truong <alexandre.truong@....com>,
Quentin Monnet <quentin@...valent.com>,
William Cohen <wcohen@...hat.com>,
Andres Freund <andres@...razel.de>,
Adrian Hunter <adrian.hunter@...el.com>,
"Martin Liška" <mliska@...e.cz>,
Colin Ian King <colin.king@...el.com>,
James Clark <james.clark@....com>,
Fangrui Song <maskray@...gle.com>,
Stephane Eranian <eranian@...gle.com>,
Kajol Jain <kjain@...ux.ibm.com>,
Alexey Bayduraev <alexey.v.bayduraev@...ux.intel.com>,
Riccardo Mancini <rickyman7@...il.com>,
Andi Kleen <ak@...ux.intel.com>,
Masami Hiramatsu <mhiramat@...nel.org>,
Zechuan Chen <chenzechuan1@...wei.com>,
Jason Wang <wangborong@...rlc.com>,
Christophe JAILLET <christophe.jaillet@...adoo.fr>,
Remi Bernon <rbernon@...eweavers.com>,
linux-kernel@...r.kernel.org, linux-perf-users@...r.kernel.org,
bpf@...r.kernel.org, llvm@...ts.linux.dev
Cc: Ian Rogers <irogers@...gle.com>
Subject: [PATCH v2 08/18] perf sched: Update use of pthread mutex
Switch to the use of mutex wrappers that provide better error
checking. Update cmd_sched so that we always explicitly destroy the
mutexes.
Signed-off-by: Ian Rogers <irogers@...gle.com>
---
tools/perf/builtin-sched.c | 67 ++++++++++++++++++--------------------
1 file changed, 32 insertions(+), 35 deletions(-)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 2f6cd1b8b662..0f52f73be896 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -7,6 +7,7 @@
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/evsel_fprintf.h"
+#include "util/mutex.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
@@ -184,8 +185,8 @@ struct perf_sched {
struct task_desc **pid_to_task;
struct task_desc **tasks;
const struct trace_sched_handler *tp_handler;
- pthread_mutex_t start_work_mutex;
- pthread_mutex_t work_done_wait_mutex;
+ struct mutex start_work_mutex;
+ struct mutex work_done_wait_mutex;
int profile_cpu;
/*
* Track the current task - that way we can know whether there's any
@@ -635,10 +636,8 @@ static void *thread_func(void *ctx)
again:
ret = sem_post(&this_task->ready_for_work);
BUG_ON(ret);
- ret = pthread_mutex_lock(&sched->start_work_mutex);
- BUG_ON(ret);
- ret = pthread_mutex_unlock(&sched->start_work_mutex);
- BUG_ON(ret);
+ mutex_lock(&sched->start_work_mutex);
+ mutex_unlock(&sched->start_work_mutex);
cpu_usage_0 = get_cpu_usage_nsec_self(fd);
@@ -652,10 +651,8 @@ static void *thread_func(void *ctx)
ret = sem_post(&this_task->work_done_sem);
BUG_ON(ret);
- ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
- BUG_ON(ret);
- ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
- BUG_ON(ret);
+ mutex_lock(&sched->work_done_wait_mutex);
+ mutex_unlock(&sched->work_done_wait_mutex);
goto again;
}
@@ -672,10 +669,8 @@ static void create_tasks(struct perf_sched *sched)
err = pthread_attr_setstacksize(&attr,
(size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
BUG_ON(err);
- err = pthread_mutex_lock(&sched->start_work_mutex);
- BUG_ON(err);
- err = pthread_mutex_lock(&sched->work_done_wait_mutex);
- BUG_ON(err);
+ mutex_lock(&sched->start_work_mutex);
+ mutex_lock(&sched->work_done_wait_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
struct sched_thread_parms *parms = malloc(sizeof(*parms));
BUG_ON(parms == NULL);
@@ -699,7 +694,7 @@ static void wait_for_tasks(struct perf_sched *sched)
sched->start_time = get_nsecs();
sched->cpu_usage = 0;
- pthread_mutex_unlock(&sched->work_done_wait_mutex);
+ mutex_unlock(&sched->work_done_wait_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
task = sched->tasks[i];
@@ -707,12 +702,11 @@ static void wait_for_tasks(struct perf_sched *sched)
BUG_ON(ret);
sem_init(&task->ready_for_work, 0, 0);
}
- ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
- BUG_ON(ret);
+ mutex_lock(&sched->work_done_wait_mutex);
cpu_usage_0 = get_cpu_usage_nsec_parent();
- pthread_mutex_unlock(&sched->start_work_mutex);
+ mutex_unlock(&sched->start_work_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
task = sched->tasks[i];
@@ -734,8 +728,7 @@ static void wait_for_tasks(struct perf_sched *sched)
sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
sched->parent_cpu_usage)/sched->replay_repeat;
- ret = pthread_mutex_lock(&sched->start_work_mutex);
- BUG_ON(ret);
+ mutex_lock(&sched->start_work_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
task = sched->tasks[i];
@@ -3430,8 +3423,6 @@ int cmd_sched(int argc, const char **argv)
},
.cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
.sort_list = LIST_HEAD_INIT(sched.sort_list),
- .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
- .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
.sort_order = default_sort_order,
.replay_repeat = 10,
.profile_cpu = -1,
@@ -3545,8 +3536,10 @@ int cmd_sched(int argc, const char **argv)
.fork_event = replay_fork_event,
};
unsigned int i;
- int ret;
+ int ret = 0;
+ mutex_init(&sched.start_work_mutex, /*pshared=*/false);
+ mutex_init(&sched.work_done_wait_mutex, /*pshared=*/false);
for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
sched.curr_pid[i] = -1;
@@ -3558,11 +3551,10 @@ int cmd_sched(int argc, const char **argv)
/*
* Aliased to 'perf script' for now:
*/
- if (!strcmp(argv[0], "script"))
- return cmd_script(argc, argv);
-
- if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
- return __cmd_record(argc, argv);
+ if (!strcmp(argv[0], "script")) {
+ ret = cmd_script(argc, argv);
+ } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
+ ret = __cmd_record(argc, argv);
} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
sched.tp_handler = &lat_ops;
if (argc > 1) {
@@ -3571,7 +3563,7 @@ int cmd_sched(int argc, const char **argv)
usage_with_options(latency_usage, latency_options);
}
setup_sorting(&sched, latency_options, latency_usage);
- return perf_sched__lat(&sched);
+ ret = perf_sched__lat(&sched);
} else if (!strcmp(argv[0], "map")) {
if (argc) {
argc = parse_options(argc, argv, map_options, map_usage, 0);
@@ -3580,7 +3572,7 @@ int cmd_sched(int argc, const char **argv)
}
sched.tp_handler = &map_ops;
setup_sorting(&sched, latency_options, latency_usage);
- return perf_sched__map(&sched);
+ ret = perf_sched__map(&sched);
} else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
sched.tp_handler = &replay_ops;
if (argc) {
@@ -3588,7 +3580,7 @@ int cmd_sched(int argc, const char **argv)
if (argc)
usage_with_options(replay_usage, replay_options);
}
- return perf_sched__replay(&sched);
+ ret = perf_sched__replay(&sched);
} else if (!strcmp(argv[0], "timehist")) {
if (argc) {
argc = parse_options(argc, argv, timehist_options,
@@ -3604,16 +3596,21 @@ int cmd_sched(int argc, const char **argv)
parse_options_usage(NULL, timehist_options, "w", true);
if (sched.show_next)
parse_options_usage(NULL, timehist_options, "n", true);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ret = symbol__validate_sym_arguments();
if (ret)
- return ret;
+ goto out;
- return perf_sched__timehist(&sched);
+ ret = perf_sched__timehist(&sched);
} else {
usage_with_options(sched_usage, sched_options);
}
- return 0;
+out:
+ mutex_destroy(&sched.start_work_mutex);
+ mutex_destroy(&sched.work_done_wait_mutex);
+
+ return ret;
}
--
2.37.2.609.g9ff673ca1a-goog
Powered by blists - more mailing lists