lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <tip-srwunsy7o5wl17vpt4a10oxp@git.kernel.org>
Date:	Tue, 12 Aug 2014 22:14:17 -0700
From:	tip-bot for Jiri Olsa <tipbot@...or.com>
To:	linux-tip-commits@...r.kernel.org
Cc:	acme@...hat.com, linux-kernel@...r.kernel.org, paulus@...ba.org,
	hpa@...or.com, mingo@...nel.org, jolsa@...nel.org,
	a.p.zijlstra@...llo.nl, jean.pihet@...aro.org, namhyung@...nel.org,
	fweisbec@...il.com, dsahern@...il.com, tglx@...utronix.de,
	cjashfor@...ux.vnet.ibm.com
Subject: [tip:perf/core] perf tools: Add ordered_events__(new|delete)
  interface

Commit-ID:  c64c7e1a5addf93b7dec98a27b8c48457506aa06
Gitweb:     http://git.kernel.org/tip/c64c7e1a5addf93b7dec98a27b8c48457506aa06
Author:     Jiri Olsa <jolsa@...nel.org>
AuthorDate: Tue, 10 Jun 2014 21:58:02 +0200
Committer:  Arnaldo Carvalho de Melo <acme@...hat.com>
CommitDate: Tue, 12 Aug 2014 12:02:56 -0300

perf tools: Add ordered_events__(new|delete) interface

Adding new ordered events interface to new|delete event buffer:

  ordered_events__new    - allocate event buffer from the cache
  ordered_events__delete - return event buffer to the cache

Signed-off-by: Jiri Olsa <jolsa@...nel.org>
Acked-by: David Ahern <dsahern@...il.com>
Cc: Corey Ashford <cjashfor@...ux.vnet.ibm.com>
Cc: David Ahern <dsahern@...il.com>
Cc: Frederic Weisbecker <fweisbec@...il.com>
Cc: Ingo Molnar <mingo@...nel.org>
Cc: Jean Pihet <jean.pihet@...aro.org>
Cc: Namhyung Kim <namhyung@...nel.org>
Cc: Paul Mackerras <paulus@...ba.org>
Cc: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Link: http://lkml.kernel.org/n/tip-srwunsy7o5wl17vpt4a10oxp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@...hat.com>
---
 tools/perf/util/session.c | 169 +++++++++++++++++++++++++++-------------------
 1 file changed, 98 insertions(+), 71 deletions(-)

diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 619778e..ff0188c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -464,6 +464,100 @@ static void perf_session_free_sample_buffers(struct perf_session *session)
 	}
 }
 
+/* The queue is ordered by time */
+static void queue_event(struct ordered_events *oe, struct ordered_event *new)
+{
+	struct ordered_event *last = oe->last;
+	u64 timestamp = new->timestamp;
+	struct list_head *p;
+
+	++oe->nr_events;
+	oe->last = new;
+
+	if (!last) {
+		list_add(&new->list, &oe->events);
+		oe->max_timestamp = timestamp;
+		return;
+	}
+
+	/*
+	 * last event might point to some random place in the list as it's
+	 * the last queued event. We expect that the new event is close to
+	 * this.
+	 */
+	if (last->timestamp <= timestamp) {
+		while (last->timestamp <= timestamp) {
+			p = last->list.next;
+			if (p == &oe->events) {
+				list_add_tail(&new->list, &oe->events);
+				oe->max_timestamp = timestamp;
+				return;
+			}
+			last = list_entry(p, struct ordered_event, list);
+		}
+		list_add_tail(&new->list, &last->list);
+	} else {
+		while (last->timestamp > timestamp) {
+			p = last->list.prev;
+			if (p == &oe->events) {
+				list_add(&new->list, &oe->events);
+				return;
+			}
+			last = list_entry(p, struct ordered_event, list);
+		}
+		list_add(&new->list, &last->list);
+	}
+}
+
+#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct ordered_event))
+static struct ordered_event *alloc_event(struct ordered_events *oe)
+{
+	struct list_head *cache = &oe->cache;
+	struct ordered_event *new;
+
+	if (!list_empty(cache)) {
+		new = list_entry(cache->next, struct ordered_event, list);
+		list_del(&new->list);
+	} else if (oe->buffer) {
+		new = oe->buffer + oe->buffer_idx;
+		if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
+			oe->buffer = NULL;
+	} else {
+		oe->buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
+		if (!oe->buffer)
+			return NULL;
+		list_add(&oe->buffer->list, &oe->to_free);
+
+		/* First entry is abused to maintain the to_free list. */
+		oe->buffer_idx = 2;
+		new = oe->buffer + 1;
+	}
+
+	return new;
+}
+
+static struct ordered_event *
+ordered_events__new(struct ordered_events *oe, u64 timestamp)
+{
+	struct ordered_event *new;
+
+	new = alloc_event(oe);
+	if (new) {
+		new->timestamp = timestamp;
+		queue_event(oe, new);
+	}
+
+	return new;
+}
+
+static void
+ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
+{
+	list_del(&event->list);
+	list_add(&event->list, &oe->cache);
+	oe->nr_events--;
+}
+
 static int perf_session_deliver_event(struct perf_session *session,
 				      union perf_event *event,
 				      struct perf_sample *sample,
@@ -506,10 +600,8 @@ static int ordered_events__flush(struct perf_session *s,
 				return ret;
 		}
 
+		ordered_events__delete(oe, iter);
 		oe->last_flush = iter->timestamp;
-		list_del(&iter->list);
-		list_add(&iter->list, &oe->cache);
-		oe->nr_events--;
 
 		if (show_progress)
 			ui_progress__update(&prog, 1);
@@ -573,59 +665,10 @@ static int process_finished_round(struct perf_tool *tool,
 	return ret;
 }
 
-/* The queue is ordered by time */
-static void __queue_event(struct ordered_event *new, struct perf_session *s)
-{
-	struct ordered_events *oe = &s->ordered_events;
-	struct ordered_event *last = oe->last;
-	u64 timestamp = new->timestamp;
-	struct list_head *p;
-
-	++oe->nr_events;
-	oe->last = new;
-
-	if (!last) {
-		list_add(&new->list, &oe->events);
-		oe->max_timestamp = timestamp;
-		return;
-	}
-
-	/*
-	 * last event might point to some random place in the list as it's
-	 * the last queued event. We expect that the new event is close to
-	 * this.
-	 */
-	if (last->timestamp <= timestamp) {
-		while (last->timestamp <= timestamp) {
-			p = last->list.next;
-			if (p == &oe->events) {
-				list_add_tail(&new->list, &oe->events);
-				oe->max_timestamp = timestamp;
-				return;
-			}
-			last = list_entry(p, struct ordered_event, list);
-		}
-		list_add_tail(&new->list, &last->list);
-	} else {
-		while (last->timestamp > timestamp) {
-			p = last->list.prev;
-			if (p == &oe->events) {
-				list_add(&new->list, &oe->events);
-				return;
-			}
-			last = list_entry(p, struct ordered_event, list);
-		}
-		list_add(&new->list, &last->list);
-	}
-}
-
-#define MAX_SAMPLE_BUFFER	(64 * 1024 / sizeof(struct ordered_event))
-
 int perf_session_queue_event(struct perf_session *s, union perf_event *event,
 				    struct perf_sample *sample, u64 file_offset)
 {
 	struct ordered_events *oe = &s->ordered_events;
-	struct list_head *cache = &oe->cache;
 	u64 timestamp = sample->time;
 	struct ordered_event *new;
 
@@ -637,28 +680,12 @@ int perf_session_queue_event(struct perf_session *s, union perf_event *event,
 		return -EINVAL;
 	}
 
-	if (!list_empty(cache)) {
-		new = list_entry(cache->next, struct ordered_event, list);
-		list_del(&new->list);
-	} else if (oe->buffer) {
-		new = oe->buffer + oe->buffer_idx;
-		if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
-			oe->buffer = NULL;
-	} else {
-		oe->buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new));
-		if (!oe->buffer)
-			return -ENOMEM;
-		list_add(&oe->buffer->list, &oe->to_free);
-		oe->buffer_idx = 2;
-		new = oe->buffer + 1;
-	}
+	new = ordered_events__new(oe, timestamp);
+	if (!new)
+		return -ENOMEM;
 
-	new->timestamp = timestamp;
 	new->file_offset = file_offset;
 	new->event = event;
-
-	__queue_event(new, s);
-
 	return 0;
 }
 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ