lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Fri, 22 Jan 2010 02:16:18 +0100
From:	Frederic Weisbecker <fweisbec@...il.com>
To:	Ingo Molnar <mingo@...e.hu>
Cc:	LKML <linux-kernel@...r.kernel.org>,
	Frederic Weisbecker <fweisbec@...il.com>,
	Steven Rostedt <rostedt@...dmis.org>,
	Li Zefan <lizf@...fujitsu.com>,
	Lai Jiangshan <laijs@...fujitsu.com>
Subject: [RFC PATCH 06/10] ftrace: Release the function hlist if we don't need it anymore

After we disable the function profiler, the function hashlist
stays in memory. This is wasteful as nobody needs it anymore,
until the next use if any.

Release it when we disable the function profiler instead of
resetting it in the next use.

Signed-off-by: Frederic Weisbecker <fweisbec@...il.com>
Cc: Steven Rostedt <rostedt@...dmis.org>
Cc: Li Zefan <lizf@...fujitsu.com>
Cc: Lai Jiangshan <laijs@...fujitsu.com>
---
 kernel/trace/ftrace.c          |    1 +
 kernel/trace/functions_hlist.c |   61 +++++++++++++++++-----------------------
 kernel/trace/functions_hlist.h |    1 +
 3 files changed, 28 insertions(+), 35 deletions(-)

diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index dfd8f7c..0ded01c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -509,6 +509,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
 			 * so this acts like an synchronize_sched.
 			 */
 			unregister_ftrace_profiler();
+			function_hlist_release();
 		}
 	}
  out:
diff --git a/kernel/trace/functions_hlist.c b/kernel/trace/functions_hlist.c
index 37804c4..c79c4c5 100644
--- a/kernel/trace/functions_hlist.c
+++ b/kernel/trace/functions_hlist.c
@@ -21,20 +21,23 @@ DEFINE_PER_CPU(struct func_hlist, func_hlist_cpu);
 
 int functions_hash_bits __read_mostly;
 
-static void function_hlist_reset(struct func_hlist *hlist)
+static void __function_hlist_release(struct func_hlist *hlist)
 {
-	struct func_hlist_page *pg;
-
-	pg = hlist->pages = hlist->start;
+	struct func_hlist_page *pg = hlist->start;
 
 	while (pg) {
-		memset(pg->records, 0, FUNCTIONS_RECORDS_SIZE);
-		pg->index = 0;
+		unsigned long tmp = (unsigned long)pg;
+
 		pg = pg->next;
+		free_page(tmp);
 	}
 
-	memset(hlist->hash, 0,
-	       FUNCTIONS_HLIST_SIZE * sizeof(struct hlist_head));
+	free_page((unsigned long)hlist->pages);
+	hlist->pages = NULL;
+	hlist->start = NULL;
+
+	kfree(hlist->hash);
+	hlist->hash = NULL;
 }
 
 static int function_hlist_pages_init(struct func_hlist *hlist)
@@ -44,10 +47,6 @@ static int function_hlist_pages_init(struct func_hlist *hlist)
 	int pages;
 	int i;
 
-	/* If we already allocated, do nothing */
-	if (hlist->pages)
-		return 0;
-
 	hlist->pages = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!hlist->pages)
 		return -ENOMEM;
@@ -72,26 +71,11 @@ static int function_hlist_pages_init(struct func_hlist *hlist)
 	for (i = 0; i < pages; i++) {
 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
 		if (!pg->next)
-			goto out_free;
+			return -ENOMEM;
 		pg = pg->next;
 	}
 
 	return 0;
-
- out_free:
-	pg = hlist->start;
-	while (pg) {
-		unsigned long tmp = (unsigned long)pg;
-
-		pg = pg->next;
-		free_page(tmp);
-	}
-
-	free_page((unsigned long)hlist->pages);
-	hlist->pages = NULL;
-	hlist->start = NULL;
-
-	return -ENOMEM;
 }
 
 static int function_hlist_init_cpu(int cpu)
@@ -101,11 +85,8 @@ static int function_hlist_init_cpu(int cpu)
 
 	hlist = &per_cpu(func_hlist_cpu, cpu);
 
-	if (hlist->hash) {
-		/* If the profile is already created, simply reset it */
-		function_hlist_reset(hlist);
-		return 0;
-	}
+	if (WARN_ON_ONCE(hlist->hash))
+		return -EBUSY;
 
 	/*
 	 * We are profiling all functions, but usually only a few thousand
@@ -127,14 +108,24 @@ static int function_hlist_init_cpu(int cpu)
 
 	/* Preallocate the function profiling pages */
 	if (function_hlist_pages_init(hlist) < 0) {
-		kfree(hlist->hash);
-		hlist->hash = NULL;
+		__function_hlist_release(hlist);
 		return -ENOMEM;
 	}
 
 	return 0;
 }
 
+void function_hlist_release(void)
+{
+	int cpu;
+	struct func_hlist *hlist;
+
+	for_each_online_cpu(cpu) {
+		hlist = &per_cpu(func_hlist_cpu, cpu);
+		__function_hlist_release(hlist);
+	}
+}
+
 int function_hlist_init(void)
 {
 	int cpu;
diff --git a/kernel/trace/functions_hlist.h b/kernel/trace/functions_hlist.h
index 3f4e485..8001f95 100644
--- a/kernel/trace/functions_hlist.h
+++ b/kernel/trace/functions_hlist.h
@@ -36,3 +36,4 @@ struct func_node *
 function_hlist_record_alloc(struct func_hlist *hlist, unsigned long ip);
 
 int function_hlist_init(void);
+void function_hlist_release(void);
-- 
1.6.2.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ