lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-ID: <tip-b5fae128e41021889777f8ead810cbd2a8b249fc@git.kernel.org>
Date:	Tue, 15 Sep 2009 09:34:47 GMT
From:	tip-bot for Ingo Molnar <mingo@...e.hu>
To:	linux-tip-commits@...r.kernel.org
Cc:	linux-kernel@...r.kernel.org, acme@...hat.com, paulus@...ba.org,
	hpa@...or.com, mingo@...hat.com, a.p.zijlstra@...llo.nl,
	efault@....de, fweisbec@...il.com, tglx@...utronix.de,
	mingo@...e.hu
Subject: [tip:perfcounters/core] perf sched: Clean up PID sorting logic

Commit-ID:  b5fae128e41021889777f8ead810cbd2a8b249fc
Gitweb:     http://git.kernel.org/tip/b5fae128e41021889777f8ead810cbd2a8b249fc
Author:     Ingo Molnar <mingo@...e.hu>
AuthorDate: Fri, 11 Sep 2009 12:12:54 +0200
Committer:  Ingo Molnar <mingo@...e.hu>
CommitDate: Sun, 13 Sep 2009 10:22:50 +0200

perf sched: Clean up PID sorting logic

Use a sort list for thread atoms insertion as well - instead of
hardcoded for PID.

Cc: Peter Zijlstra <a.p.zijlstra@...llo.nl>
Cc: Mike Galbraith <efault@....de>
Cc: Paul Mackerras <paulus@...ba.org>
Cc: Arnaldo Carvalho de Melo <acme@...hat.com>
Cc: Frederic Weisbecker <fweisbec@...il.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@...e.hu>


---
 tools/perf/builtin-sched.c |   88 +++++++++++++++++++++++--------------------
 tools/perf/util/thread.h   |    8 ++--
 2 files changed, 51 insertions(+), 45 deletions(-)

diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index cc2dbd5..b72544f 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -144,7 +144,7 @@ struct task_atoms {
 	u64			total_runtime;
 };
 
-typedef int (*sort_thread_lat)(struct task_atoms *, struct task_atoms *);
+typedef int (*sort_fn_t)(struct task_atoms *, struct task_atoms *);
 
 static struct rb_root		atom_root, sorted_atom_root;
 
@@ -869,41 +869,22 @@ static struct trace_sched_handler replay_ops  = {
 	.fork_event		= replay_fork_event,
 };
 
-static struct task_atoms *
-thread_atoms_search(struct rb_root *root, struct thread *thread)
-{
-	struct rb_node *node = root->rb_node;
-
-	while (node) {
-		struct task_atoms *atoms;
-
-		atoms = container_of(node, struct task_atoms, node);
-		if (thread->pid > atoms->thread->pid)
-			node = node->rb_left;
-		else if (thread->pid < atoms->thread->pid)
-			node = node->rb_right;
-		else {
-			return atoms;
-		}
-	}
-	return NULL;
-}
-
 struct sort_dimension {
 	const char		*name;
-	sort_thread_lat		cmp;
+	sort_fn_t		cmp;
 	struct list_head	list;
 };
 
 static LIST_HEAD(cmp_pid);
 
 static int
-thread_lat_cmp(struct list_head *list, struct task_atoms *l,
-	       struct task_atoms *r)
+thread_lat_cmp(struct list_head *list, struct task_atoms *l, struct task_atoms *r)
 {
 	struct sort_dimension *sort;
 	int ret = 0;
 
+	BUG_ON(list_empty(list));
+
 	list_for_each_entry(sort, list, list) {
 		ret = sort->cmp(l, r);
 		if (ret)
@@ -913,6 +894,32 @@ thread_lat_cmp(struct list_head *list, struct task_atoms *l,
 	return ret;
 }
 
+static struct task_atoms *
+thread_atoms_search(struct rb_root *root, struct thread *thread,
+			 struct list_head *sort_list)
+{
+	struct rb_node *node = root->rb_node;
+	struct task_atoms key = { .thread = thread };
+
+	while (node) {
+		struct task_atoms *atoms;
+		int cmp;
+
+		atoms = container_of(node, struct task_atoms, node);
+
+		cmp = thread_lat_cmp(sort_list, &key, atoms);
+		if (cmp > 0)
+			node = node->rb_left;
+		else if (cmp < 0)
+			node = node->rb_right;
+		else {
+			BUG_ON(thread != atoms->thread);
+			return atoms;
+		}
+	}
+	return NULL;
+}
+
 static void
 __thread_latency_insert(struct rb_root *root, struct task_atoms *data,
 			 struct list_head *sort_list)
@@ -1049,18 +1056,18 @@ latency_switch_event(struct trace_switch_event *switch_event,
 	sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
 	sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
 
-	in_atoms = thread_atoms_search(&atom_root, sched_in);
+	in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
 	if (!in_atoms) {
 		thread_atoms_insert(sched_in);
-		in_atoms = thread_atoms_search(&atom_root, sched_in);
+		in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
 		if (!in_atoms)
 			die("in-atom: Internal tree error");
 	}
 
-	out_atoms = thread_atoms_search(&atom_root, sched_out);
+	out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
 	if (!out_atoms) {
 		thread_atoms_insert(sched_out);
-		out_atoms = thread_atoms_search(&atom_root, sched_out);
+		out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
 		if (!out_atoms)
 			die("out-atom: Internal tree error");
 	}
@@ -1085,7 +1092,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
 		return;
 
 	wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
-	atoms = thread_atoms_search(&atom_root, wakee);
+	atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
 	if (!atoms) {
 		thread_atoms_insert(wakee);
 		return;
@@ -1136,7 +1143,6 @@ static void output_lat_thread(struct task_atoms *atom_list)
 
 static int pid_cmp(struct task_atoms *l, struct task_atoms *r)
 {
-
 	if (l->thread->pid < r->thread->pid)
 		return -1;
 	if (l->thread->pid > r->thread->pid)
@@ -1146,8 +1152,8 @@ static int pid_cmp(struct task_atoms *l, struct task_atoms *r)
 }
 
 static struct sort_dimension pid_sort_dimension = {
-	.name = "pid",
-	.cmp = pid_cmp,
+	.name			= "pid",
+	.cmp			= pid_cmp,
 };
 
 static int avg_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1172,8 +1178,8 @@ static int avg_cmp(struct task_atoms *l, struct task_atoms *r)
 }
 
 static struct sort_dimension avg_sort_dimension = {
-	.name 	= "avg",
-	.cmp	= avg_cmp,
+	.name			= "avg",
+	.cmp			= avg_cmp,
 };
 
 static int max_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1187,8 +1193,8 @@ static int max_cmp(struct task_atoms *l, struct task_atoms *r)
 }
 
 static struct sort_dimension max_sort_dimension = {
-	.name 	= "max",
-	.cmp	= max_cmp,
+	.name			= "max",
+	.cmp			= max_cmp,
 };
 
 static int switch_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1202,8 +1208,8 @@ static int switch_cmp(struct task_atoms *l, struct task_atoms *r)
 }
 
 static struct sort_dimension switch_sort_dimension = {
-	.name 	= "switch",
-	.cmp	= switch_cmp,
+	.name			= "switch",
+	.cmp			= switch_cmp,
 };
 
 static int runtime_cmp(struct task_atoms *l, struct task_atoms *r)
@@ -1217,8 +1223,8 @@ static int runtime_cmp(struct task_atoms *l, struct task_atoms *r)
 }
 
 static struct sort_dimension runtime_sort_dimension = {
-	.name 	= "runtime",
-	.cmp	= runtime_cmp,
+	.name			= "runtime",
+	.cmp			= runtime_cmp,
 };
 
 static struct sort_dimension *available_sorts[] = {
@@ -1666,8 +1672,8 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used)
 			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
 			if (argc)
 				usage_with_options(latency_usage, latency_options);
-			setup_sorting();
 		}
+		setup_sorting();
 		__cmd_lat();
 	} else if (!strncmp(argv[0], "rep", 3)) {
 		trace_handler = &replay_ops;
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 634f280..665d1f3 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -4,10 +4,10 @@
 #include "symbol.h"
 
 struct thread {
-	struct rb_node	 rb_node;
-	struct list_head maps;
-	pid_t		 pid;
-	char		 *comm;
+	struct rb_node		rb_node;
+	struct list_head	maps;
+	pid_t			pid;
+	char			*comm;
 };
 
 int thread__set_comm(struct thread *self, const char *comm);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ