[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <alpine.LFD.2.00.0908112005190.1283@localhost.localdomain>
Date: Tue, 11 Aug 2009 20:09:06 +0200 (CEST)
From: Thomas Gleixner <tglx@...utronix.de>
To: Will Schmidt <will_schmidt@...t.ibm.com>
cc: LKML <linux-kernel@...r.kernel.org>,
rt-users <linux-rt-users@...r.kernel.org>,
Ingo Molnar <mingo@...e.hu>,
Steven Rostedt <rostedt@...dmis.org>,
Peter Zijlstra <peterz@...radead.org>,
Carsten Emde <ce@...g.ch>,
Clark Williams <williams@...hat.com>,
Frank Rowand <frank.rowand@...sony.com>,
Robin Gareus <robin@...eus.org>,
Gregory Haskins <ghaskins@...ell.com>,
Philippe Reynes <philippe.reynes@...smpp.fr>,
Fernando Lopez-Lezcano <nando@...ma.Stanford.EDU>,
Darren Hart <dvhltc@...ibm.com>, Jan Blunck <jblunck@...e.de>,
Sven-Thorsten Dietrich <sdietrich@...ell.com>,
Jon Masters <jcm@...hat.com>
Subject: Re: [ANNOUNCE] 2.6.31-rc4-rt1
On Wed, 5 Aug 2009, Will Schmidt wrote:
> /test/willschm/linux-2.6.31-rtx/kernel/spinlock.c:103: error: implicit
> declaration of function ‘_raw_atomic_spin_relax’
Fix below. Thanks,
tglx
-------
commit ffc969930727238b847176c203bdbe1f9dffe403
Author: Thomas Gleixner <tglx@...utronix.de>
Date: Tue Aug 11 20:03:47 2009 +0200
locks: Fix PREEMPT=y, LOCKBREAK=y, DEBUG_LOCK_ALLOC=n compile
Should be folded back into the atomic lock conversion
Reported-by: Will Schmidt <will_schmidt@...t.ibm.com>
Signed-off-by: Thomas Gleixner <tglx@...utronix.de>
diff --git a/kernel/lock-internals.h b/kernel/lock-internals.h
index 4f0bc8b..76f694c 100644
--- a/kernel/lock-internals.h
+++ b/kernel/lock-internals.h
@@ -9,8 +9,8 @@
* (We do this in a function because inlining it would be excessive.)
*/
-#define BUILD_LOCK_OPS(op, locktype) \
-void __lockfunc _##op##_lock(locktype##_t *lock) \
+#define BUILD_LOCK_OPS(prefix, op, locktype) \
+void __lockfunc _##prefix##_lock(locktype##_t *lock) \
{ \
for (;;) { \
preempt_disable(); \
@@ -20,15 +20,15 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!op##_can_lock(lock) && (lock)->break_lock) \
+ while (!prefix##_can_lock(lock) && (lock)->break_lock) \
_raw_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
} \
\
-EXPORT_SYMBOL(_##op##_lock); \
+EXPORT_SYMBOL(_##prefix##_lock); \
\
-unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
+unsigned long __lockfunc _##prefix##_lock_irqsave(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -42,23 +42,23 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
\
if (!(lock)->break_lock) \
(lock)->break_lock = 1; \
- while (!op##_can_lock(lock) && (lock)->break_lock) \
+ while (!prefix##_can_lock(lock) && (lock)->break_lock) \
_raw_##op##_relax(&lock->raw_lock); \
} \
(lock)->break_lock = 0; \
return flags; \
} \
\
-EXPORT_SYMBOL(_##op##_lock_irqsave); \
+EXPORT_SYMBOL(_##prefix##_lock_irqsave); \
\
-void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
+void __lockfunc _##prefix##_lock_irq(locktype##_t *lock) \
{ \
- _##op##_lock_irqsave(lock); \
+ _##prefix##_lock_irqsave(lock); \
} \
\
-EXPORT_SYMBOL(_##op##_lock_irq); \
+EXPORT_SYMBOL(_##prefix##_lock_irq); \
\
-void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
+void __lockfunc _##prefix##_lock_bh(locktype##_t *lock) \
{ \
unsigned long flags; \
\
@@ -67,9 +67,9 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
/* irq-disabling. We use the generic preemption-aware */ \
/* function: */ \
/**/ \
- flags = _##op##_lock_irqsave(lock); \
+ flags = _##prefix##_lock_irqsave(lock); \
local_bh_disable(); \
local_irq_restore(flags); \
} \
\
-EXPORT_SYMBOL(_##op##_lock_bh)
+EXPORT_SYMBOL(_##prefix##_lock_bh)
diff --git a/kernel/rwlock.c b/kernel/rwlock.c
index 35460b3..eaf34c8 100644
--- a/kernel/rwlock.c
+++ b/kernel/rwlock.c
@@ -146,8 +146,8 @@ EXPORT_SYMBOL(_write_lock);
* _[read|write]_lock_irqsave()
* _[read|write]_lock_bh()
*/
-BUILD_LOCK_OPS(read, rwlock);
-BUILD_LOCK_OPS(write, rwlock);
+BUILD_LOCK_OPS(read, read, rwlock);
+BUILD_LOCK_OPS(write, write, rwlock);
#endif /* CONFIG_PREEMPT */
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 6a3c0c4..e3194d5 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -100,7 +100,7 @@ EXPORT_SYMBOL(_atomic_spin_lock);
* _atomic_spin_lock_irqsave()
* _atomic_spin_lock_bh()
*/
-BUILD_LOCK_OPS(atomic_spin, atomic_spinlock);
+BUILD_LOCK_OPS(atomic_spin, spin, atomic_spinlock);
#endif /* CONFIG_PREEMPT */
Powered by blists - more mailing lists