[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20190107183444.GA5966@xps-13>
Date: Mon, 7 Jan 2019 19:34:44 +0100
From: Andrea Righi <righi.andrea@...il.com>
To: Masami Hiramatsu <mhiramat@...nel.org>
Cc: Ingo Molnar <mingo@...hat.com>, peterz@...radead.org,
Mathieu Desnoyers <mathieu.desnoyers@...icios.com>,
linux-kernel <linux-kernel@...r.kernel.org>,
Steven Rostedt <rostedt@...dmis.org>
Subject: Re: [PATCH 0/2] kprobes: Fix kretprobe incorrect stacking order
problem
On Mon, Jan 07, 2019 at 10:31:34PM +0900, Masami Hiramatsu wrote:
...
> BTW, this is not all of issues. To remove CONFIG_KPROBE_EVENTS_ON_NOTRACE
> I'm trying to find out other notrace functions which can cause
> kernel crash by probing. Mostly done on x86, so I'll post it
> after this series.
Not sure if you found it already, but it looks like some of the
_raw_spin_lock/unlock* functions (when they're not inlined) are causing
the same problem (or something similar), I can deadlock the system by
doing this for example:
echo "r:event_1 __fdget" >> kprobe_events
echo "r:event_2 _raw_spin_lock_irqsave" >> kprobe_events
echo 1 > events/kprobes/enable
[DEADLOCK]
Sending the following just in case...
Thanks,
kernel/locking/spinlock.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 936f3d14dd6b..d93e88019239 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -19,6 +19,7 @@
#include <linux/preempt.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/kprobes.h>
#include <linux/debug_locks.h>
#include <linux/export.h>
@@ -128,6 +129,7 @@ int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
return __raw_spin_trylock(lock);
}
EXPORT_SYMBOL(_raw_spin_trylock);
+NOKPROBE_SYMBOL(_raw_spin_trylock);
#endif
#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
@@ -136,6 +138,7 @@ int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
return __raw_spin_trylock_bh(lock);
}
EXPORT_SYMBOL(_raw_spin_trylock_bh);
+NOKPROBE_SYMBOL(_raw_spin_trylock_bh);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK
@@ -144,6 +147,7 @@ void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
__raw_spin_lock(lock);
}
EXPORT_SYMBOL(_raw_spin_lock);
+NOKPROBE_SYMBOL(_raw_spin_lock);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
@@ -152,6 +156,7 @@ unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
return __raw_spin_lock_irqsave(lock);
}
EXPORT_SYMBOL(_raw_spin_lock_irqsave);
+NOKPROBE_SYMBOL(_raw_spin_lock_irqsave);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
@@ -160,6 +165,7 @@ void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
__raw_spin_lock_irq(lock);
}
EXPORT_SYMBOL(_raw_spin_lock_irq);
+NOKPROBE_SYMBOL(_raw_spin_lock_irq);
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_BH
@@ -168,6 +174,7 @@ void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
__raw_spin_lock_bh(lock);
}
EXPORT_SYMBOL(_raw_spin_lock_bh);
+NOKPROBE_SYMBOL(_raw_spin_lock_bh);
#endif
#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
@@ -176,6 +183,7 @@ void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
__raw_spin_unlock(lock);
}
EXPORT_SYMBOL(_raw_spin_unlock);
+NOKPROBE_SYMBOL(_raw_spin_unlock);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
@@ -184,6 +192,7 @@ void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long
__raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
+NOKPROBE_SYMBOL(_raw_spin_unlock_irqrestore);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
@@ -192,6 +201,7 @@ void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
__raw_spin_unlock_irq(lock);
}
EXPORT_SYMBOL(_raw_spin_unlock_irq);
+NOKPROBE_SYMBOL(_raw_spin_unlock_irq);
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
@@ -200,6 +210,7 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
__raw_spin_unlock_bh(lock);
}
EXPORT_SYMBOL(_raw_spin_unlock_bh);
+NOKPROBE_SYMBOL(_raw_spin_unlock_bh);
#endif
#ifndef CONFIG_INLINE_READ_TRYLOCK
Signed-off-by: Andrea Righi <righi.andrea@...il.com>
Powered by blists - more mailing lists