[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <tip-16fbad086976574b99ea7019c0504d0194e95dc3@git.kernel.org>
Date: Mon, 3 Jun 2019 06:36:55 -0700
From: tip-bot for Mark Rutland <tipbot@...or.com>
To: linux-tip-commits@...r.kernel.org
Cc: hpa@...or.com, peterz@...radead.org, mark.rutland@....com,
torvalds@...ux-foundation.org, vgupta@...opsys.com,
will.deacon@....com, linux-kernel@...r.kernel.org,
mingo@...nel.org, tglx@...utronix.de
Subject: [tip:locking/core] locking/atomic, arc: Use s64 for atomic64
Commit-ID: 16fbad086976574b99ea7019c0504d0194e95dc3
Gitweb: https://git.kernel.org/tip/16fbad086976574b99ea7019c0504d0194e95dc3
Author: Mark Rutland <mark.rutland@....com>
AuthorDate: Wed, 22 May 2019 14:22:37 +0100
Committer: Ingo Molnar <mingo@...nel.org>
CommitDate: Mon, 3 Jun 2019 12:32:56 +0200
locking/atomic, arc: Use s64 for atomic64
As a step towards making the atomic64 API use consistent types treewide,
let's have the arc atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than u64, matching the generated headers.
Otherwise, there should be no functional change as a result of this
patch.
Acked-By: Vineet Gupta <vgupta@...opsys.com>
Signed-off-by: Mark Rutland <mark.rutland@....com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
Cc: Linus Torvalds <torvalds@...ux-foundation.org>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Thomas Gleixner <tglx@...utronix.de>
Cc: Will Deacon <will.deacon@....com>
Cc: aou@...s.berkeley.edu
Cc: arnd@...db.de
Cc: bp@...en8.de
Cc: catalin.marinas@....com
Cc: davem@...emloft.net
Cc: fenghua.yu@...el.com
Cc: heiko.carstens@...ibm.com
Cc: herbert@...dor.apana.org.au
Cc: ink@...assic.park.msu.ru
Cc: jhogan@...nel.org
Cc: linux@...linux.org.uk
Cc: mattst88@...il.com
Cc: mpe@...erman.id.au
Cc: palmer@...ive.com
Cc: paul.burton@...s.com
Cc: paulus@...ba.org
Cc: ralf@...ux-mips.org
Cc: rth@...ddle.net
Cc: tony.luck@...el.com
Link: https://lkml.kernel.org/r/20190522132250.26499-6-mark.rutland@arm.com
Signed-off-by: Ingo Molnar <mingo@...nel.org>
---
arch/arc/include/asm/atomic.h | 41 ++++++++++++++++++++---------------------
1 file changed, 20 insertions(+), 21 deletions(-)
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 158af079838d..2c75df55d0d2 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -324,14 +324,14 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
*/
typedef struct {
- aligned_u64 counter;
+ s64 __aligned(8) counter;
} atomic64_t;
#define ATOMIC64_INIT(a) { (a) }
-static inline long long atomic64_read(const atomic64_t *v)
+static inline s64 atomic64_read(const atomic64_t *v)
{
- unsigned long long val;
+ s64 val;
__asm__ __volatile__(
" ldd %0, [%1] \n"
@@ -341,7 +341,7 @@ static inline long long atomic64_read(const atomic64_t *v)
return val;
}
-static inline void atomic64_set(atomic64_t *v, long long a)
+static inline void atomic64_set(atomic64_t *v, s64 a)
{
/*
* This could have been a simple assignment in "C" but would need
@@ -362,9 +362,9 @@ static inline void atomic64_set(atomic64_t *v, long long a)
}
#define ATOMIC64_OP(op, op1, op2) \
-static inline void atomic64_##op(long long a, atomic64_t *v) \
+static inline void atomic64_##op(s64 a, atomic64_t *v) \
{ \
- unsigned long long val; \
+ s64 val; \
\
__asm__ __volatile__( \
"1: \n" \
@@ -375,13 +375,13 @@ static inline void atomic64_##op(long long a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); \
+ : "cc"); \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
-static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \
+static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
- unsigned long long val; \
+ s64 val; \
\
smp_mb(); \
\
@@ -402,9 +402,9 @@ static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \
}
#define ATOMIC64_FETCH_OP(op, op1, op2) \
-static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
+static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
- unsigned long long val, orig; \
+ s64 val, orig; \
\
smp_mb(); \
\
@@ -444,10 +444,10 @@ ATOMIC64_OPS(xor, xor, xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-static inline long long
-atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
+static inline s64
+atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
{
- long long prev;
+ s64 prev;
smp_mb();
@@ -467,9 +467,9 @@ atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
return prev;
}
-static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
{
- long long prev;
+ s64 prev;
smp_mb();
@@ -495,9 +495,9 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
* the atomic variable, v, was not decremented.
*/
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 atomic64_dec_if_positive(atomic64_t *v)
{
- long long val;
+ s64 val;
smp_mb();
@@ -528,10 +528,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, if it was not @u.
* Returns the old value of @v
*/
-static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
- long long u)
+static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
- long long old, temp;
+ s64 old, temp;
smp_mb();
Powered by blists - more mailing lists