[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <34d8ec75acb56fbbbef08fee8be8404015e682cd.1629490974.git.rickyman7@gmail.com>
Date: Sat, 21 Aug 2021 11:19:16 +0200
From: Riccardo Mancini <rickyman7@...il.com>
To: Arnaldo Carvalho de Melo <acme@...nel.org>
Cc: Ian Rogers <irogers@...gle.com>,
Namhyung Kim <namhyung@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Ingo Molnar <mingo@...hat.com>,
Mark Rutland <mark.rutland@....com>,
Jiri Olsa <jolsa@...hat.com>, linux-kernel@...r.kernel.org,
linux-perf-users@...r.kernel.org,
Riccardo Mancini <rickyman7@...il.com>
Subject: [RFC PATCH v1 10/37] perf evlist: add multithreading helper
This patch adds the function evlist__for_each_evsel_cpu, which executes
the given function on each evsel, for each cpu.
If perf_singlethreaded is unset, this function will use a workqueue to
execute the function.
This helper function will be used in the following patches.
Signed-off-by: Riccardo Mancini <rickyman7@...il.com>
---
tools/perf/util/evlist.c | 117 +++++++++++++++++++++++++++++++++++++++
tools/perf/util/evlist.h | 14 +++++
2 files changed, 131 insertions(+)
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 3d55d9a53b9f4875..f9fdbd85a163ee97 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -27,6 +27,8 @@
#include "util/perf_api_probe.h"
#include "util/evsel_fprintf.h"
#include "util/evlist-hybrid.h"
+#include "util/util.h"
+#include "util/workqueue/workqueue.h"
#include <signal.h>
#include <unistd.h>
#include <sched.h>
@@ -41,6 +43,7 @@
#include <sys/prctl.h>
#include <linux/bitops.h>
+#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/log2.h>
#include <linux/err.h>
@@ -369,6 +372,120 @@ static int evlist__is_enabled(struct evlist *evlist)
return false;
}
+struct evlist_work {
+ struct work_struct work;
+ struct evlist *evlist;
+ int cpu;
+ evsel__cpu_func func;
+ void *args;
+ int ret;
+};
+
+static void evlist__for_each_evsel_cpu_thread_func(struct work_struct *_work)
+{
+ struct evlist_work *work = container_of(_work, struct evlist_work, work);
+ int cpu_idx, ret, err = 0;
+ struct evsel *pos;
+
+ work->ret = 0;
+ evlist__for_each_entry(work->evlist, pos) {
+ cpu_idx = evsel__find_cpu(pos, work->cpu);
+ if (cpu_idx < 0)
+ continue;
+ ret = work->func(work->evlist, pos, cpu_idx, work->args);
+ if (ret) {
+ work->ret = ret;
+ if (err < 0) // error
+ return;
+ }
+ }
+}
+
+static int evlist__for_each_evsel_cpu_multithreaded(struct evlist *evlist,
+ evsel__cpu_func func, void *args)
+{
+ int i, cpu, ret;
+ struct evlist_work *works;
+ char errbuf[WORKQUEUE_STRERR_BUFSIZE];
+
+ works = calloc(perf_cpu_map__nr(evlist->core.all_cpus), sizeof(*works));
+ perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
+ init_work(&works[i].work);
+ works[i].evlist = evlist;
+ works[i].work.func = evlist__for_each_evsel_cpu_thread_func;
+ works[i].cpu = cpu;
+ works[i].func = func;
+ works[i].args = args;
+ works[i].ret = 0;
+
+ ret = schedule_work_on(cpu, &works[i].work);
+ if (ret) {
+ workqueue_strerror(global_wq, ret, errbuf, sizeof(errbuf));
+ pr_debug("schedule_work: %s\n", errbuf);
+ break;
+ }
+ }
+
+ ret = flush_scheduled_work();
+ if (ret) {
+ workqueue_strerror(global_wq, ret, errbuf, sizeof(errbuf));
+ pr_debug("flush_scheduled_work: %s\n", errbuf);
+ goto out;
+ }
+
+ perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
+ if (works[i].ret) {
+ ret = works[i].ret;
+ if (works[i].ret < 0) // error
+ goto out;
+ }
+ }
+out:
+ free(works);
+
+ return ret;
+}
+
+static int evlist__for_each_evsel_cpu_singlethreaded(struct evlist *evlist,
+ evsel__cpu_func func, void *args)
+{
+ int ret, err = 0, i, cpu, cpu_idx;
+ struct affinity affinity;
+ struct evsel *pos;
+
+ if (affinity__setup(&affinity) < 0)
+ return -1;
+
+ evlist__for_each_cpu(evlist, i, cpu) {
+ affinity__set(&affinity, cpu);
+
+ evlist__for_each_entry(evlist, pos) {
+ cpu_idx = evsel__find_cpu(pos, cpu);
+ if (cpu_idx < 0)
+ continue;
+ ret = func(evlist, pos, cpu_idx, args);
+ if (ret) {
+ err = ret;
+ if (err < 0) // error
+ goto out;
+ }
+ }
+ }
+
+out:
+ affinity__cleanup(&affinity);
+ return err;
+}
+
+int evlist__for_each_evsel_cpu(struct evlist *evlist, evsel__cpu_func func, void *args)
+{
+ if (perf_singlethreaded)
+ return evlist__for_each_evsel_cpu_singlethreaded(evlist, func, args);
+ else
+ return evlist__for_each_evsel_cpu_multithreaded(evlist, func, args);
+
+}
+
static void __evlist__disable(struct evlist *evlist, char *evsel_name)
{
struct evsel *pos;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index fde893170c7ba6d2..5f24a45d4e3cf30a 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -363,4 +363,18 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
void evlist__check_mem_load_aux(struct evlist *evlist);
+
+/**
+ * evsel__cpu_func - function to run on each evsel for each cpu
+ * @evlist: the parent evlist
+ * @evsel: the processed evsel
+ * @cpu: index of the cpu in evsel->core.cpus
+ * @args: additional custom arguments
+ *
+ * Returns:
+ * A negative value is considered as an error.
+ * A positive value will be propagated to evlist__for_each_evsel_cpu.
+ */
+typedef int (*evsel__cpu_func)(struct evlist *evlist, struct evsel *evsel, int cpu, void *args);
+int evlist__for_each_evsel_cpu(struct evlist *evlist, evsel__cpu_func func, void *args);
#endif /* __PERF_EVLIST_H */
--
2.31.1
Powered by blists - more mailing lists