From: Steven Rostedt The function trace_clock_local uses sched_clock for taking the time stamp. For some archs this is not the most efficient method. Making the trace_clock_local and trace_normalize_local functions weak allow for archs to override what they are defined as. This patch also removes some "notrace" annotations from the trace_clock.c file since the entire trace directory has the -pg option removed from compiling. Signed-off-by: Steven Rostedt --- kernel/trace/trace_clock.c | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 168bf59..2b21f61 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -28,7 +28,7 @@ * Useful for tracing that does not cross to other CPUs nor * does it go through idle events. */ -u64 notrace trace_clock_local(void) +u64 __weak trace_clock_local(void) { u64 clock; int resched; @@ -52,7 +52,7 @@ u64 notrace trace_clock_local(void) * * Normalize the trace_clock_local value. */ -void notrace trace_normalize_local(int cpu, u64 *ts) +void __weak trace_normalize_local(int cpu, u64 *ts) { /* nop */ } @@ -65,7 +65,7 @@ void notrace trace_normalize_local(int cpu, u64 *ts) * jitter between CPUs. So it's a pretty scalable clock, but there * can be offsets in the trace data. */ -u64 notrace trace_clock(void) +u64 trace_clock(void) { return cpu_clock(raw_smp_processor_id()); } @@ -89,7 +89,7 @@ static struct { .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, }; -u64 notrace trace_clock_global(void) +u64 trace_clock_global(void) { unsigned long flags; int this_cpu; -- 1.6.5 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/