[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <8fc6dde58658e38552132ce3f0ddf5d79f166f7c.1289940821.git.jeremy.fitzhardinge@citrix.com>
Date: Tue, 16 Nov 2010 13:08:39 -0800
From: Jeremy Fitzhardinge <jeremy@...p.org>
To: Peter Zijlstra <peterz@...radead.org>
Cc: Linux Kernel Mailing List <linux-kernel@...r.kernel.org>,
Nick Piggin <npiggin@...nel.dk>,
Mathieu Desnoyers <mathieu.desnoyers@...ymtl.ca>,
Américo Wang <xiyou.wangcong@...il.com>,
Eric Dumazet <dada1@...mosbay.com>,
Jan Beulich <JBeulich@...ell.com>, Avi Kivity <avi@...hat.com>,
Xen-devel <xen-devel@...ts.xensource.com>,
"H. Peter Anvin" <hpa@...or.com>,
Linux Virtualization <virtualization@...ts.linux-foundation.org>,
Srivatsa Vaddagiri <vatsa@...ux.vnet.ibm.com>,
Jeremy Fitzhardinge <jeremy.fitzhardinge@...rix.com>
Subject: [PATCH 08/14] x86/ticketlock: collapse a layer of functions
From: Jeremy Fitzhardinge <jeremy.fitzhardinge@...rix.com>
Now that the paravirtualization layer doesn't exist at the spinlock
level any more, we can collapse the __ticket_ functions into the arch_
functions.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@...rix.com>
---
arch/x86/include/asm/spinlock.h | 35 +++++------------------------------
1 files changed, 5 insertions(+), 30 deletions(-)
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 8e379d3..cfa80b5 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -108,7 +108,7 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock, __t
____ticket_unlock_kick(lock, next);
}
-static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock)
+static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
{
register struct __raw_tickets inc;
@@ -128,7 +128,7 @@ static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock)
out: barrier(); /* make sure nothing creeps before the lock is taken */
}
-static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
arch_spinlock_t old, new;
@@ -142,7 +142,7 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}
-static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_t next = lock->tickets.head + 1;
__ticket_unlock_release(lock);
@@ -150,46 +150,21 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
barrier(); /* prevent reordering into locked region */
}
-static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
return !!(tmp.tail ^ tmp.head);
}
-static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
}
-
-static inline int arch_spin_is_locked(arch_spinlock_t *lock)
-{
- return __ticket_spin_is_locked(lock);
-}
-
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
- return __ticket_spin_is_contended(lock);
-}
#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- __ticket_spin_lock(lock);
-}
-
-static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- return __ticket_spin_trylock(lock);
-}
-
-static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- __ticket_spin_unlock(lock);
-}
-
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
--
1.7.2.3
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
Powered by blists - more mailing lists