[<prev] [next>] [day] [month] [year] [list]
Message-ID: <tip-3rdeg23rv3brvy1pwt3igvyw@git.kernel.org>
Date: Tue, 18 Dec 2018 06:15:17 -0800
From: tip-bot for Jiri Olsa <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: tglx@...utronix.de, hpa@...or.com, davem@...emloft.net,
mingo@...nel.org, acme@...hat.com,
alexander.shishkin@...ux.intel.com, namhyung@...nel.org,
peterz@...radead.org, jolsa@...nel.org,
linux-kernel@...r.kernel.org
Subject: [tip:perf/core] perf top: Use cond variable instead of a lock
Commit-ID: 94ad6e7e3606454498aeac1fdd1b9de5c1e6735a
Gitweb: https://git.kernel.org/tip/94ad6e7e3606454498aeac1fdd1b9de5c1e6735a
Author: Jiri Olsa <jolsa@...nel.org>
AuthorDate: Mon, 5 Nov 2018 21:23:40 +0100
Committer: Arnaldo Carvalho de Melo <acme@...hat.com>
CommitDate: Mon, 17 Dec 2018 14:58:03 -0300
perf top: Use cond variable instead of a lock
Use conditional variable logic to synchronize between the reading and
processing threads. Currently it's done by having mutex around rotation
code.
Using a POSIX cond variable to sync both threads after queues rotation:
Process thread:
- Detects data
- Switches queues
- Sets rotate variable
- Waits in pthread_cond_wait()
Read thread:
- Detects rotate is set
- Kicks the process thread with a pthread_cond_signal()
After this rotation is safely completed and both threads can continue
with the new queue.
Signed-off-by: Jiri Olsa <jolsa@...nel.org>
Acked-by: David S. Miller <davem@...emloft.net>
Acked-by: Namhyung Kim <namhyung@...nel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@...hat.com>
Cc: Alexander Shishkin <alexander.shishkin@...ux.intel.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Link: http://lkml.kernel.org/n/tip-3rdeg23rv3brvy1pwt3igvyw@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@...hat.com>
---
tools/perf/builtin-top.c | 24 +++++++++++++++++-------
tools/perf/util/top.h | 4 +++-
2 files changed, 20 insertions(+), 8 deletions(-)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 75afeae7f04d..aad58643102e 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -846,13 +846,18 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
if (ret && ret != -1)
break;
- pthread_mutex_lock(&top->qe.lock);
ret = ordered_events__queue(top->qe.in, event, timestamp, 0);
- pthread_mutex_unlock(&top->qe.lock);
-
- perf_mmap__consume(md);
if (ret)
break;
+
+ perf_mmap__consume(md);
+
+ if (top->qe.rotate) {
+ pthread_mutex_lock(&top->qe.mutex);
+ top->qe.rotate = false;
+ pthread_cond_signal(&top->qe.cond);
+ pthread_mutex_unlock(&top->qe.mutex);
+ }
}
perf_mmap__read_done(md);
@@ -1059,9 +1064,12 @@ static void *process_thread(void *arg)
continue;
}
- pthread_mutex_lock(&top->qe.lock);
out = rotate_queues(top);
- pthread_mutex_unlock(&top->qe.lock);
+
+ pthread_mutex_lock(&top->qe.mutex);
+ top->qe.rotate = true;
+ pthread_cond_wait(&top->qe.cond, &top->qe.mutex);
+ pthread_mutex_unlock(&top->qe.mutex);
if (ordered_events__flush(out, OE_FLUSH__TOP))
pr_err("failed to process events\n");
@@ -1151,7 +1159,8 @@ static void init_process_thread(struct perf_top *top)
ordered_events__set_copy_on_queue(&top->qe.data[0], true);
ordered_events__set_copy_on_queue(&top->qe.data[1], true);
top->qe.in = &top->qe.data[0];
- pthread_mutex_init(&top->qe.lock, NULL);
+ pthread_mutex_init(&top->qe.mutex, NULL);
+ pthread_cond_init(&top->qe.cond, NULL);
}
static int __cmd_top(struct perf_top *top)
@@ -1271,6 +1280,7 @@ static int __cmd_top(struct perf_top *top)
out_join:
pthread_join(thread, NULL);
out_join_thread:
+ pthread_cond_signal(&top->qe.cond);
pthread_join(thread_process, NULL);
out_delete:
perf_session__delete(top->session);
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 5f503293cfd8..5bce62ebcf14 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -44,7 +44,9 @@ struct perf_top {
struct {
struct ordered_events *in;
struct ordered_events data[2];
- pthread_mutex_t lock;
+ bool rotate;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
} qe;
};
Powered by blists - more mailing lists