For use in qspinlock because unconditional atomic ops scale better than cmpxchg loops. Signed-off-by: Peter Zijlstra --- arch/x86/include/asm/atomic.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -218,6 +218,19 @@ static inline short int atomic_inc_short return *v; } +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int atomic_test_and_set_bit(int nr, atomic_t *v) +{ + GEN_BINARY_RMWcc(LOCK_PREFIX "bts", v->counter, "Ir", nr, "%0", "c"); +} + #ifdef CONFIG_X86_64 /** * atomic_or_long - OR of two long integers -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/