lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 02 Feb 2010 10:59:18 -0500
From:	Steven Rostedt <rostedt@...dmis.org>
To:	LKML <linux-kernel@...r.kernel.org>
Cc:	Ingo Molnar <mingo@...e.hu>, Lai Jiangshan <laijs@...fujitsu.com>,
	Li Zefan <lizf@...fujitsu.com>
Subject: [PATCH][GIT PULL][v2.6.33] tracing: Fix circular dead lock in
 stack trace


Ingo,

Please pull the latest tip/tracing/urgent tree, which can be found at:

  git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace.git
tip/tracing/urgent


Lai Jiangshan (1):
      tracing: Fix circular dead lock in stack trace

----
 kernel/trace/trace_stack.c |   24 ++++++++++++++++++++++++
 1 files changed, 24 insertions(+), 0 deletions(-)
---------------------------
commit 4f48f8b7fd18c44f8478174f9925cc3c059c6ce4
Author: Lai Jiangshan <laijs@...fujitsu.com>
Date:   Tue Feb 2 15:32:09 2010 +0800

    tracing: Fix circular dead lock in stack trace
    
    When we cat <debugfs>/tracing/stack_trace, we may cause circular lock:
    sys_read()
      t_start()
         arch_spin_lock(&max_stack_lock);
    
      t_show()
         seq_printf(), vsnprintf() .... /* they are all trace-able,
           when they are traced, max_stack_lock may be required again. */
    
    The following script can trigger this circular dead lock very easy:
    #!/bin/bash
    
    echo 1 > /proc/sys/kernel/stack_tracer_enabled
    
    mount -t debugfs xxx /mnt > /dev/null 2>&1
    
    (
    # make check_stack() zealous to require max_stack_lock
    for ((; ;))
    {
    	echo 1 > /mnt/tracing/stack_max_size
    }
    ) &
    
    for ((; ;))
    {
    	cat /mnt/tracing/stack_trace > /dev/null
    }
    
    To fix this bug, we increase the percpu trace_active before
    require the lock.
    
    Reported-by: Li Zefan <lizf@...fujitsu.com>
    Signed-off-by: Lai Jiangshan <laijs@...fujitsu.com>
    LKML-Reference: <4B67D4F9.9080905@...fujitsu.com>
    Signed-off-by: Steven Rostedt <rostedt@...dmis.org>

diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 678a512..f4bc9b2 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
 	unsigned long val, flags;
 	char buf[64];
 	int ret;
+	int cpu;
 
 	if (count >= sizeof(buf))
 		return -EINVAL;
@@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
 		return ret;
 
 	local_irq_save(flags);
+
+	/*
+	 * In case we trace inside arch_spin_lock() or after (NMI),
+	 * we will cause circular lock, so we also need to increase
+	 * the percpu trace_active here.
+	 */
+	cpu = smp_processor_id();
+	per_cpu(trace_active, cpu)++;
+
 	arch_spin_lock(&max_stack_lock);
 	*ptr = val;
 	arch_spin_unlock(&max_stack_lock);
+
+	per_cpu(trace_active, cpu)--;
 	local_irq_restore(flags);
 
 	return count;
@@ -206,7 +218,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void *t_start(struct seq_file *m, loff_t *pos)
 {
+	int cpu;
+
 	local_irq_disable();
+
+	cpu = smp_processor_id();
+	per_cpu(trace_active, cpu)++;
+
 	arch_spin_lock(&max_stack_lock);
 
 	if (*pos == 0)
@@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
 
 static void t_stop(struct seq_file *m, void *p)
 {
+	int cpu;
+
 	arch_spin_unlock(&max_stack_lock);
+
+	cpu = smp_processor_id();
+	per_cpu(trace_active, cpu)--;
+
 	local_irq_enable();
 }
 


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ