[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20070621204234.GA1510@elte.hu>
Date: Thu, 21 Jun 2007 22:42:34 +0200
From: Ingo Molnar <mingo@...e.hu>
To: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Eric Dumazet <dada1@...mosbay.com>,
Chuck Ebbert <cebbert@...hat.com>,
Jarek Poplawski <jarkao2@...pl>,
Miklos Szeredi <miklos@...redi.hu>, chris@...ee.ca,
linux-kernel@...r.kernel.org, tglx@...utronix.de,
akpm@...ux-foundation.org
Subject: [patch] spinlock debug: make looping nicer
* Linus Torvalds <torvalds@...ux-foundation.org> wrote:
> Anybody who ever waits for a lock by busy-looping over it is BUGGY,
> dammit!
btw., back then we also tried a spin_is_locked() based inner loop but it
didnt help the ->tree_lock lockups either. In any case i very much agree
that the 'nicer' looping should be added again - the patch below does
that. (build and boot tested)
and the reason that this didnt help the ->tree_lock lockup is likely the
same why wait_task_inactive() broke _independently_ of the 'niceness' of
the spin-lock operation: there were too few instructions between
releasing the lock and re-acquiring it again can cause permanent
starvation of another CPU. No amount of logic on the spinning side can
overcome this, if acquire/release critical sections are following each
other too fast.
Ingo
------------------------------>
Subject: [patch] spinlock debug: make looping nicer
From: Ingo Molnar <mingo@...e.hu>
make the spin-trylock loops nicer - and reactive the read and
write loops as well.
Signed-off-by: Ingo Molnar <mingo@...e.hu>
---
lib/spinlock_debug.c | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
Index: linux/lib/spinlock_debug.c
===================================================================
--- linux.orig/lib/spinlock_debug.c
+++ linux/lib/spinlock_debug.c
@@ -106,9 +106,14 @@ static void __spin_lock_debug(spinlock_t
for (;;) {
for (i = 0; i < loops; i++) {
+ /*
+ * Ugly: we do the __delay() so that we know how
+ * long to loop before printing a debug message:
+ */
+ while (spin_is_locked(lock))
+ __delay(1);
if (__raw_spin_trylock(&lock->raw_lock))
return;
- __delay(1);
}
/* lockup suspected: */
if (print_once) {
@@ -167,7 +172,6 @@ static void rwlock_bug(rwlock_t *lock, c
#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
-#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
static void __read_lock_debug(rwlock_t *lock)
{
u64 i;
@@ -176,9 +180,10 @@ static void __read_lock_debug(rwlock_t *
for (;;) {
for (i = 0; i < loops; i++) {
+ while (!read_can_lock(lock))
+ __delay(1);
if (__raw_read_trylock(&lock->raw_lock))
return;
- __delay(1);
}
/* lockup suspected: */
if (print_once) {
@@ -191,12 +196,11 @@ static void __read_lock_debug(rwlock_t *
}
}
}
-#endif
void _raw_read_lock(rwlock_t *lock)
{
RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
- __raw_read_lock(&lock->raw_lock);
+ __read_lock_debug(lock);
}
int _raw_read_trylock(rwlock_t *lock)
@@ -242,7 +246,6 @@ static inline void debug_write_unlock(rw
lock->owner_cpu = -1;
}
-#if 0 /* This can cause lockups */
static void __write_lock_debug(rwlock_t *lock)
{
u64 i;
@@ -251,9 +254,10 @@ static void __write_lock_debug(rwlock_t
for (;;) {
for (i = 0; i < loops; i++) {
+ while (!write_can_lock(lock))
+ __delay(1);
if (__raw_write_trylock(&lock->raw_lock))
return;
- __delay(1);
}
/* lockup suspected: */
if (print_once) {
@@ -266,12 +270,11 @@ static void __write_lock_debug(rwlock_t
}
}
}
-#endif
void _raw_write_lock(rwlock_t *lock)
{
debug_write_lock_before(lock);
- __raw_write_lock(&lock->raw_lock);
+ __write_lock_debug(lock);
debug_write_lock_after(lock);
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists