From: Steven Rostedt When doing intense tracing, the kmalloc inside trace_marker can introduce side effects to what is being traced. It is best to simply use a static buffer and grab a mutex to write to it. This keeps the impact of using the trace_marker() to a minimum. Suggested-by: Thomas Gleixner Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 33 +++++++++++++++++---------------- 1 files changed, 17 insertions(+), 16 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4b8df0d..e463125 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1442,15 +1442,19 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags) #endif /* CONFIG_STACKTRACE */ +static char trace_buf[TRACE_BUF_SIZE]; +static char trace_ubuf[TRACE_BUF_SIZE]; + +static arch_spinlock_t trace_buf_lock = + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; +static DEFINE_MUTEX(trace_ubuf_mutex); + /** * trace_vbprintk - write binary msg to tracing buffer * */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { - static arch_spinlock_t trace_buf_lock = - (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; - static u32 trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_bprint; struct ring_buffer_event *event; @@ -1480,7 +1484,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) /* Lockdep uses trace_printk for lock tracing */ local_irq_save(flags); arch_spin_lock(&trace_buf_lock); - len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); + len = vbin_printf((u32 *)trace_buf, TRACE_BUF_SIZE/sizeof(int), fmt, args); if (len > TRACE_BUF_SIZE || len < 0) goto out_unlock; @@ -1532,9 +1536,6 @@ int trace_array_printk(struct trace_array *tr, int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { - static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; - static char trace_buf[TRACE_BUF_SIZE]; - struct ftrace_event_call *call = &event_print; struct ring_buffer_event *event; struct ring_buffer *buffer; @@ -3633,23 +3634,22 @@ static ssize_t tracing_mark_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { - char *buf; size_t written; + char *buf = trace_ubuf; if (tracing_disabled) return -EINVAL; - if (cnt > TRACE_BUF_SIZE) - cnt = TRACE_BUF_SIZE; + if (cnt >= TRACE_BUF_SIZE) + cnt = TRACE_BUF_SIZE - 2; /* \n\0 */ - buf = kmalloc(cnt + 2, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; + mutex_lock(&trace_ubuf_mutex); if (copy_from_user(buf, ubuf, cnt)) { - kfree(buf); - return -EFAULT; + written = -EFAULT; + goto out; } + if (buf[cnt-1] != '\n') { buf[cnt] = '\n'; buf[cnt+1] = '\0'; @@ -3657,12 +3657,13 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, buf[cnt] = '\0'; written = mark_printk("%s", buf); - kfree(buf); *fpos += written; /* don't tell userspace we wrote more - it might confuse them */ if (written > cnt) written = cnt; + out: + mutex_unlock(&trace_ubuf_mutex); return written; } -- 1.7.5.4 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/