lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Date:	Tue, 31 Mar 2015 10:46:49 -0400
From:	David Ahern <david.ahern@...cle.com>
To:	acme@...nel.org
Cc:	linux-kernel@...r.kernel.org, David Ahern <david.ahern@...cle.com>,
	Ingo Molnar <mingo@...nel.org>
Subject: [PATCH] perf sched: Make cpu based arrays dynamic

Remove MAX_CPUS and static array sizing in favor of run time growth.

Signed-off-by: David Ahern <david.ahern@...cle.com>
Cc: Ingo Molnar <mingo@...nel.org>
---
 tools/perf/builtin-sched.c | 84 ++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 73 insertions(+), 11 deletions(-)

diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 858d85396d81..1d111e66b6b3 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -30,7 +30,6 @@
 #include <math.h>
 
 #define PR_SET_NAME		15               /* Set process name */
-#define MAX_CPUS		4096
 #define COMM_LEN		20
 #define SYM_LEN			129
 
@@ -139,8 +138,6 @@ struct perf_sched {
  * weird events, such as a task being switched away that is not current.
  */
 	int		 max_cpu;
-	u32		 curr_pid[MAX_CPUS];
-	struct thread	 *curr_thread[MAX_CPUS];
 	char		 next_shortname1;
 	char		 next_shortname2;
 	unsigned int	 replay_repeat;
@@ -170,10 +167,15 @@ struct perf_sched {
 	u64		 run_avg;
 	u64		 all_runtime;
 	u64		 all_count;
-	u64		 cpu_last_switched[MAX_CPUS];
 	struct rb_root	 atom_root, sorted_atom_root;
 	struct list_head sort_list, cmp_pid;
 
+	/* current allocation size for curr_pid and curr_thread */
+	int		 alloc_cpu;
+	u32		 *curr_pid;
+	struct thread	 **curr_thread;
+	u64		 *cpu_last_switched;
+
 	/* options for timehist command */
 	bool		summary;
 	bool		summary_only;
@@ -733,6 +735,56 @@ replay_wakeup_event(struct perf_sched *sched,
 	return 0;
 }
 
+static int sched__expand_cpu_arrays(struct perf_sched *sched, int cpu)
+{
+	void *p;
+	int i;
+
+	p = realloc(sched->curr_pid, cpu * sizeof(u32));
+	if (p == NULL) {
+		pr_err("Failed to expand curr_pid array\n");
+		return -1;
+	}
+	sched->curr_pid = p;
+
+	p = realloc(sched->curr_thread, cpu * sizeof(struct thread *));
+	if (p == NULL) {
+		pr_err("Failed to expand curr_thread array\n");
+		return -1;
+	}
+	sched->curr_thread = p;
+
+	p = realloc(sched->cpu_last_switched, cpu * sizeof(u64));
+	if (p == NULL) {
+		pr_err("Failed to expand cpu_last_switched array\n");
+		return -1;
+	}
+	sched->cpu_last_switched = p;
+
+	/* initialize new slots */
+	for (i = sched->alloc_cpu; i < cpu; ++i) {
+		sched->cpu_last_switched[i] = (u64) 0;
+		sched->curr_pid[i] = -1;
+		sched->curr_thread[i] = NULL;
+	}
+
+	sched->alloc_cpu = cpu;
+
+	return 0;
+}
+
+static int sched__check_cpu_arrays(struct perf_sched *sched, int cpu)
+{
+	if (sched->alloc_cpu <= cpu) {
+		int n = __roundup_pow_of_two(cpu + 1);
+
+		if (sched__expand_cpu_arrays(sched, n) != 0)
+			return -1;
+	}
+
+	return 0;
+}
+
 static int replay_switch_event(struct perf_sched *sched,
 			       struct perf_evsel *evsel,
 			       struct perf_sample *sample,
@@ -751,9 +803,12 @@ static int replay_switch_event(struct perf_sched *sched,
 	if (verbose)
 		printf("sched_switch event %p\n", evsel);
 
-	if (cpu >= MAX_CPUS || cpu < 0)
+	if (cpu < 0)
 		return 0;
 
+	if (sched__check_cpu_arrays(sched, cpu) != 0)
+		return -1;
+
 	timestamp0 = sched->cpu_last_switched[cpu];
 	if (timestamp0)
 		delta = timestamp - timestamp0;
@@ -983,7 +1038,10 @@ static int latency_switch_event(struct perf_sched *sched,
 	int cpu = sample->cpu;
 	s64 delta;
 
-	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
+	BUG_ON(cpu < 0);
+
+	if (sched__check_cpu_arrays(sched, cpu) != 0)
+		return -1;
 
 	timestamp0 = sched->cpu_last_switched[cpu];
 	sched->cpu_last_switched[cpu] = timestamp;
@@ -1046,7 +1104,7 @@ static int latency_runtime_event(struct perf_sched *sched,
 	u64 timestamp = sample->time;
 	int cpu = sample->cpu;
 
-	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
+	BUG_ON(cpu < 0);
 	if (!atoms) {
 		if (thread_atoms_insert(sched, thread))
 			return -1;
@@ -1335,11 +1393,14 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
 	s64 delta;
 	int cpu, this_cpu = sample->cpu;
 
-	BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
+	BUG_ON(this_cpu < 0);
 
 	if (this_cpu > sched->max_cpu)
 		sched->max_cpu = this_cpu;
 
+	if (sched__check_cpu_arrays(sched, this_cpu) != 0)
+		return -1;
+
 	timestamp0 = sched->cpu_last_switched[this_cpu];
 	sched->cpu_last_switched[this_cpu] = timestamp;
 	if (timestamp0)
@@ -1417,6 +1478,9 @@ static int process_sched_switch_event(struct perf_tool *tool,
 	u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
 	    next_pid = perf_evsel__intval(evsel, sample, "next_pid");
 
+	if (sched__check_cpu_arrays(sched, this_cpu) != 0)
+		return -1;
+
 	if (sched->curr_pid[this_cpu] != (u32)-1) {
 		/*
 		 * Are we trying to switch away a PID that is
@@ -2983,7 +3047,6 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
 		.switch_event	    = replay_switch_event,
 		.fork_event	    = replay_fork_event,
 	};
-	unsigned int i;
 
 	sched.pid_to_task = intlist__new(NULL);
 	if (sched.pid_to_task == NULL) {
@@ -2991,8 +3054,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
-		sched.curr_pid[i] = -1;
+	sched__expand_cpu_arrays(&sched, 16);
 
 	argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
 					sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
-- 
2.3.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ