lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Wed, 17 Feb 2010 11:16:09 +0900
From:	Tejun Heo <tj@...nel.org>
To:	Steven Rostedt <rostedt@...dmis.org>,
	Frederic Weisbecker <fweisbec@...il.com>
CC:	Ingo Molnar <mingo@...e.hu>, lkml <linux-kernel@...r.kernel.org>,
	Stephen Rothwell <sfr@...b.auug.org.au>
Subject: [PATCH perf/core] percpu: add __percpu sparse annotations to trace

Add __percpu sparse annotations to trace.

These annotations are to make sparse consider percpu variables to be
in a different address space and warn if accessed without going
through percpu accessors.  This patch doesn't affect normal builds.

Signed-off-by: Tejun Heo <tj@...nel.org>
Acked-by: Steven Rostedt <rostedt@...dmis.org>
Cc: Frederic Weisbecker <fweisbec@...il.com>
Cc: Ingo Molnar <mingo@...hat.com>
---
Trace part of __percpu sparce patches.  This patch needs the following
commit from mainline to define __percpu.

 2938429501b73f6aeb312236eac7ed0416a07cd5

Thanks.

 include/trace/ftrace.h               |    2 +-
 kernel/trace/trace_event_profile.c   |   15 ++++++++-------
 kernel/trace/trace_functions_graph.c |    2 +-
 kernel/trace/trace_ksym.c            |   10 +++++-----
 4 files changed, 15 insertions(+), 14 deletions(-)

Index: linux-2.6-tip/include/trace/ftrace.h
===================================================================
--- linux-2.6-tip.orig/include/trace/ftrace.h
+++ linux-2.6-tip/include/trace/ftrace.h
@@ -773,7 +773,7 @@ __attribute__((section("_ftrace_events")
  *	struct ftrace_event_call *event_call = &event_<call>;
  *	extern void perf_tp_event(int, u64, u64, void *, int);
  *	struct ftrace_raw_##call *entry;
- *	struct perf_trace_buf *trace_buf;
+ *	struct perf_trace_buf __percpu *trace_buf;
  *	u64 __addr = 0, __count = 1;
  *	unsigned long irq_flags;
  *	struct trace_entry *ent;
Index: linux-2.6-tip/kernel/trace/trace_event_profile.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_event_profile.c
+++ linux-2.6-tip/kernel/trace/trace_event_profile.c
@@ -10,8 +10,8 @@
 #include "trace.h"
 
 
-static char *perf_trace_buf;
-static char *perf_trace_buf_nmi;
+static char __percpu *perf_trace_buf;
+static char __percpu *perf_trace_buf_nmi;
 
 typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
 
@@ -20,20 +20,20 @@ static int	total_profile_count;
 
 static int ftrace_profile_enable_event(struct ftrace_event_call *event)
 {
-	char *buf;
+	char __percpu *buf;
 	int ret = -ENOMEM;
 
 	if (event->profile_count++ > 0)
 		return 0;
 
 	if (!total_profile_count) {
-		buf = (char *)alloc_percpu(perf_trace_t);
+		buf = (char __percpu *)alloc_percpu(perf_trace_t);
 		if (!buf)
 			goto fail_buf;
 
 		rcu_assign_pointer(perf_trace_buf, buf);
 
-		buf = (char *)alloc_percpu(perf_trace_t);
+		buf = (char __percpu *)alloc_percpu(perf_trace_t);
 		if (!buf)
 			goto fail_buf_nmi;
 
@@ -79,7 +79,7 @@ int ftrace_profile_enable(int event_id)
 
 static void ftrace_profile_disable_event(struct ftrace_event_call *event)
 {
-	char *buf, *nmi_buf;
+	char __percpu *buf, *nmi_buf;
 
 	if (--event->profile_count > 0)
 		return;
@@ -123,7 +123,8 @@ __kprobes void *ftrace_perf_buf_prepare(
 					int *rctxp, unsigned long *irq_flags)
 {
 	struct trace_entry *entry;
-	char *trace_buf, *raw_data;
+	char __percpu *trace_buf;
+	char *raw_data;
 	int pc, cpu;
 
 	pc = preempt_count();
Index: linux-2.6-tip/kernel/trace/trace_functions_graph.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_functions_graph.c
+++ linux-2.6-tip/kernel/trace/trace_functions_graph.c
@@ -21,7 +21,7 @@ struct fgraph_cpu_data {
 };
 
 struct fgraph_data {
-	struct fgraph_cpu_data		*cpu_data;
+	struct fgraph_cpu_data __percpu *cpu_data;
 
 	/* Place to preserve last processed entry. */
 	struct ftrace_graph_ent_entry	ent;
Index: linux-2.6-tip/kernel/trace/trace_ksym.c
===================================================================
--- linux-2.6-tip.orig/kernel/trace/trace_ksym.c
+++ linux-2.6-tip/kernel/trace/trace_ksym.c
@@ -42,7 +42,7 @@
 #define KSYM_TRACER_OP_LEN 3 /* rw- */
 
 struct trace_ksym {
-	struct perf_event	**ksym_hbp;
+	struct perf_event	* __percpu *ksym_hbp;
 	struct perf_event_attr	attr;
 #ifdef CONFIG_PROFILE_KSYM_TRACER
 	atomic64_t		counter;
@@ -200,8 +200,8 @@ int process_new_ksym_entry(char *ksymnam
 	entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr,
 					ksym_hbp_handler);
 
-	if (IS_ERR(entry->ksym_hbp)) {
-		ret = PTR_ERR(entry->ksym_hbp);
+	if (IS_ERR((void __force *)entry->ksym_hbp)) {
+		ret = PTR_ERR((void __force *)entry->ksym_hbp);
 		printk(KERN_INFO "ksym_tracer request failed. Try again"
 					" later!!\n");
 		goto err;
@@ -331,8 +331,8 @@ static ssize_t ksym_trace_filter_write(s
 			entry->ksym_hbp =
 				register_wide_hw_breakpoint(&entry->attr,
 					ksym_hbp_handler);
-			if (IS_ERR(entry->ksym_hbp))
-				ret = PTR_ERR(entry->ksym_hbp);
+			if (IS_ERR((void __force *)entry->ksym_hbp))
+				ret = PTR_ERR((void __force *)entry->ksym_hbp);
 			else
 				goto out_unlock;
 		}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ