[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20181104.165039.878682622135788379.davem@davemloft.net>
Date: Sun, 04 Nov 2018 16:50:39 -0800 (PST)
From: David Miller <davem@...emloft.net>
To: jolsa@...hat.com
Cc: acme@...nel.org, linux-kernel@...r.kernel.org, namhyung@...nel.org,
jolsa@...nel.org
Subject: Re: [PATCH RFC] hist lookups
From: Jiri Olsa <jolsa@...hat.com>
Date: Sun, 4 Nov 2018 21:18:21 +0100
> do you have some code I could check on?
All I have is this patch which parallelizes the mmap readers in perf
top.
It's not complete and you need to add proper locking, particularly around
the machine__resolve() call.
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index d21d875..e214225 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -725,7 +725,9 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
top->exact_samples++;
- if (machine__resolve(machine, &al, sample) < 0)
+ err = machine__resolve(machine, &al, sample);
+
+ if (err < 0)
return;
if (!machine->kptr_restrict_warned &&
@@ -877,6 +879,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
perf_mmap__read_done(md);
}
+#if 0
static void perf_top__mmap_read(struct perf_top *top)
{
bool overwrite = top->record_opts.overwrite;
@@ -903,6 +906,7 @@ static void perf_top__mmap_read(struct perf_top *top)
"decreasing the freq (-F) or\n"
"limiting the number of CPUs (-C)\n");
}
+#endif
/*
* Check per-event overwrite term.
@@ -1063,6 +1067,59 @@ static int callchain_param__setup_sample_type(struct callchain_param *callchain)
return 0;
}
+struct mmap_thread_arg {
+ struct perf_top *top;
+ int index;
+};
+
+static void *mmap_thread_worker(void *arg)
+{
+ struct mmap_thread_arg *ap = arg;
+
+ while (!done)
+ perf_top__mmap_read_idx(ap->top, ap->index);
+
+ return NULL;
+}
+
+static pthread_t *mmap_threads;
+
+static int blitzkreig_bop(struct perf_top *top)
+{
+ struct perf_evlist *evlist = top->evlist;
+ int i, nr_threads = evlist->nr_mmaps;
+ struct mmap_thread_arg *ap;
+
+ fprintf(stderr, "Creating %d mmap threads\n", nr_threads);
+ fflush(stderr);
+
+ ap = calloc(sizeof(*ap), nr_threads);
+ if (!ap)
+ return -ENOMEM;
+
+ mmap_threads = calloc(sizeof(pthread_t), nr_threads);
+ if (!mmap_threads) {
+ free(ap);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_threads; i++) {
+ ap[i].top = top;
+ ap[i].index = i;
+ }
+
+ /* Current thread will handle index zero. */
+ for (i = 1; i < nr_threads; i++) {
+ int err = pthread_create(&mmap_threads[i], NULL,
+ mmap_thread_worker, &ap[i]);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int __cmd_top(struct perf_top *top)
{
char msg[512];
@@ -1134,11 +1191,6 @@ static int __cmd_top(struct perf_top *top)
if (!target__none(&opts->target))
perf_evlist__enable(top->evlist);
- /* Wait for a minimal set of events before starting the snapshot */
- perf_evlist__poll(top->evlist, 100);
-
- perf_top__mmap_read(top);
-
ret = -1;
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
display_thread), top)) {
@@ -1156,13 +1208,30 @@ static int __cmd_top(struct perf_top *top)
}
}
+ ret = blitzkreig_bop(top);
+ if (ret)
+ goto out_join;
+
+#if 1
+ perf_top__mmap_read_idx(top, 0);
+#else
+ /* Wait for a minimal set of events before starting the snapshot */
+ perf_evlist__poll(top->evlist, 100);
+ perf_top__mmap_read(top);
+#endif
+
while (!done) {
+#if 0
u64 hits = top->samples;
+#endif
+#if 1
+ perf_top__mmap_read_idx(top, 0);
+#else
perf_top__mmap_read(top);
-
if (opts->overwrite || (hits == top->samples))
ret = perf_evlist__poll(top->evlist, 100);
+#endif
if (resize) {
perf_top__resize(top);
@@ -1257,7 +1326,7 @@ int cmd_top(int argc, const char **argv)
.uses_mmap = true,
},
.proc_map_timeout = 500,
- .overwrite = 1,
+ .overwrite = 0,
},
.max_stack = sysctl__max_stack(),
.annotation_opts = annotation__default_options,
Powered by blists - more mailing lists