[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20190522132250.26499-10-mark.rutland@arm.com>
Date: Wed, 22 May 2019 14:22:41 +0100
From: Mark Rutland <mark.rutland@....com>
To: linux-kernel@...r.kernel.org, peterz@...radead.org,
will.deacon@....com
Cc: aou@...s.berkeley.edu, arnd@...db.de, bp@...en8.de,
catalin.marinas@....com, davem@...emloft.net, fenghua.yu@...el.com,
heiko.carstens@...ibm.com, herbert@...dor.apana.org.au,
ink@...assic.park.msu.ru, jhogan@...nel.org, linux@...linux.org.uk,
mark.rutland@....com, mattst88@...il.com, mingo@...nel.org,
mpe@...erman.id.au, palmer@...ive.com, paul.burton@...s.com,
paulus@...ba.org, ralf@...ux-mips.org, rth@...ddle.net,
stable@...r.kernel.org, tglx@...utronix.de, tony.luck@...el.com,
vgupta@...opsys.com
Subject: [PATCH 09/18] locking/atomic: mips: use s64 for atomic64
As a step towards making the atomic64 API use consistent types treewide,
let's have the mips atomic64 implementation use s64 as the underlying
type for atomic64_t, rather than long or __s64, matching the generated
headers.
As atomic64_read() depends on the generic defintion of atomic64_t, this
still returns long on 64-bit. This will be converted in a subsequent
patch.
Otherwise, there should be no functional change as a result of this
patch.
Signed-off-by: Mark Rutland <mark.rutland@....com>
Cc: James Hogan <jhogan@...nel.org>
Cc: Paul Burton <paul.burton@...s.com>
Cc: Peter Zijlstra <peterz@...radead.org>
Cc: Ralf Baechle <ralf@...ux-mips.org>
Cc: Will Deacon <will.deacon@....com>
---
arch/mips/include/asm/atomic.h | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 94096299fc56..9a82dd11c0e9 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -254,10 +254,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
#define ATOMIC64_OP(op, c_op, asm_op) \
-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
+static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
{ \
if (kernel_uses_llsc) { \
- long temp; \
+ s64 temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
@@ -280,12 +280,12 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
}
#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
-static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
+static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
{ \
- long result; \
+ s64 result; \
\
if (kernel_uses_llsc) { \
- long temp; \
+ s64 temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
@@ -314,12 +314,12 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
}
#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
-static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
{ \
- long result; \
+ s64 result; \
\
if (kernel_uses_llsc) { \
- long temp; \
+ s64 temp; \
\
loongson_llsc_mb(); \
__asm__ __volatile__( \
@@ -386,14 +386,14 @@ ATOMIC64_OPS(xor, ^=, xor)
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
{
- long result;
+ s64 result;
smp_mb__before_llsc();
if (kernel_uses_llsc) {
- long temp;
+ s64 temp;
__asm__ __volatile__(
" .set push \n"
--
2.11.0
Powered by blists - more mailing lists