[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZMtYZ2jcgh5hePIe@gmail.com>
Date: Thu, 3 Aug 2023 03:33:59 -0400
From: Guo Ren <guoren@...nel.org>
To: Leonardo Bras <leobras@...hat.com>
Cc: Will Deacon <will@...nel.org>,
Peter Zijlstra <peterz@...radead.org>,
Boqun Feng <boqun.feng@...il.com>,
Mark Rutland <mark.rutland@....com>,
Paul Walmsley <paul.walmsley@...ive.com>,
Palmer Dabbelt <palmer@...belt.com>,
Albert Ou <aou@...s.berkeley.edu>,
Andrzej Hajda <andrzej.hajda@...el.com>,
Arnd Bergmann <arnd@...db.de>, Ingo Molnar <mingo@...nel.org>,
Palmer Dabbelt <palmer@...osinc.com>,
linux-kernel@...r.kernel.org, linux-riscv@...ts.infradead.org
Subject: Re: [RFC PATCH v2 3/3] riscv/atomic.h : Deduplicate arch_atomic.*
On Thu, Aug 03, 2023 at 02:14:00AM -0300, Leonardo Bras wrote:
> Some functions use mostly the same asm for 32-bit and 64-bit versions.
>
> Make a macro that is generic enough and avoid code duplication.
>
> (This did not cause any change in generated asm)
>
> Signed-off-by: Leonardo Bras <leobras@...hat.com>
> ---
> arch/riscv/include/asm/atomic.h | 164 +++++++++++++++-----------------
> 1 file changed, 76 insertions(+), 88 deletions(-)
>
> diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
> index f5dfef6c2153..80cca7ac16fd 100644
> --- a/arch/riscv/include/asm/atomic.h
> +++ b/arch/riscv/include/asm/atomic.h
> @@ -196,22 +196,28 @@ ATOMIC_OPS(xor, xor, i)
> #undef ATOMIC_FETCH_OP
> #undef ATOMIC_OP_RETURN
>
> +#define _arch_atomic_fetch_add_unless(_prev, _rc, counter, _a, _u, sfx) \
> +({ \
> + __asm__ __volatile__ ( \
> + "0: lr." sfx " %[p], %[c]\n" \
> + " beq %[p], %[u], 1f\n" \
> + " add %[rc], %[p], %[a]\n" \
> + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> + " bnez %[rc], 0b\n" \
> + " fence rw, rw\n" \
> + "1:\n" \
> + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> + : [a]"r" (_a), [u]"r" (_u) \
> + : "memory"); \
> +})
> +
> /* This is required to provide a full barrier on success. */
> static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
> {
> int prev, rc;
>
> - __asm__ __volatile__ (
> - "0: lr.w %[p], %[c]\n"
> - " beq %[p], %[u], 1f\n"
> - " add %[rc], %[p], %[a]\n"
> - " sc.w.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - : [a]"r" (a), [u]"r" (u)
> - : "memory");
> + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "w");
> +
> return prev;
> }
> #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
> @@ -222,77 +228,86 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
> s64 prev;
> long rc;
>
> - __asm__ __volatile__ (
> - "0: lr.d %[p], %[c]\n"
> - " beq %[p], %[u], 1f\n"
> - " add %[rc], %[p], %[a]\n"
> - " sc.d.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - : [a]"r" (a), [u]"r" (u)
> - : "memory");
> + _arch_atomic_fetch_add_unless(prev, rc, v->counter, a, u, "d");
> +
> return prev;
> }
> #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
> #endif
>
> +#define _arch_atomic_inc_unless_negative(_prev, _rc, counter, sfx) \
> +({ \
> + __asm__ __volatile__ ( \
> + "0: lr." sfx " %[p], %[c]\n" \
> + " bltz %[p], 1f\n" \
> + " addi %[rc], %[p], 1\n" \
> + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> + " bnez %[rc], 0b\n" \
> + " fence rw, rw\n" \
> + "1:\n" \
> + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> + : \
> + : "memory"); \
> +})
> +
> static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
> {
> int prev, rc;
>
> - __asm__ __volatile__ (
> - "0: lr.w %[p], %[c]\n"
> - " bltz %[p], 1f\n"
> - " addi %[rc], %[p], 1\n"
> - " sc.w.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "w");
> +
> return !(prev < 0);
> }
>
> #define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
>
> +#define _arch_atomic_dec_unless_positive(_prev, _rc, counter, sfx) \
> +({ \
> + __asm__ __volatile__ ( \
> + "0: lr." sfx " %[p], %[c]\n" \
> + " bgtz %[p], 1f\n" \
> + " addi %[rc], %[p], -1\n" \
> + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> + " bnez %[rc], 0b\n" \
> + " fence rw, rw\n" \
> + "1:\n" \
> + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> + : \
> + : "memory"); \
> +})
> +
> static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
> {
> int prev, rc;
>
> - __asm__ __volatile__ (
> - "0: lr.w %[p], %[c]\n"
> - " bgtz %[p], 1f\n"
> - " addi %[rc], %[p], -1\n"
> - " sc.w.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "w");
> +
> return !(prev > 0);
> }
>
> #define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
>
> +#define _arch_atomic_dec_if_positive(_prev, _rc, counter, sfx) \
> +({ \
> + __asm__ __volatile__ ( \
> + "0: lr." sfx " %[p], %[c]\n" \
> + " addi %[rc], %[p], -1\n" \
> + " bltz %[rc], 1f\n" \
> + " sc." sfx ".rl %[rc], %[rc], %[c]\n" \
> + " bnez %[rc], 0b\n" \
> + " fence rw, rw\n" \
> + "1:\n" \
> + : [p]"=&r" (_prev), [rc]"=&r" (_rc), [c]"+A" (counter) \
> + : \
> + : "memory"); \
> +})
> +
> static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
> {
> int prev, rc;
>
> - __asm__ __volatile__ (
> - "0: lr.w %[p], %[c]\n"
> - " addi %[rc], %[p], -1\n"
> - " bltz %[rc], 1f\n"
> - " sc.w.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_dec_if_positive(prev, rc, v->counter, "w");
> +
> return prev - 1;
> }
>
> @@ -304,17 +319,8 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
> s64 prev;
> long rc;
>
> - __asm__ __volatile__ (
> - "0: lr.d %[p], %[c]\n"
> - " bltz %[p], 1f\n"
> - " addi %[rc], %[p], 1\n"
> - " sc.d.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_inc_unless_negative(prev, rc, v->counter, "d");
> +
> return !(prev < 0);
> }
>
> @@ -325,17 +331,8 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
> s64 prev;
> long rc;
>
> - __asm__ __volatile__ (
> - "0: lr.d %[p], %[c]\n"
> - " bgtz %[p], 1f\n"
> - " addi %[rc], %[p], -1\n"
> - " sc.d.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_dec_unless_positive(prev, rc, v->counter, "d");
> +
> return !(prev > 0);
> }
>
> @@ -346,17 +343,8 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
> s64 prev;
> long rc;
>
> - __asm__ __volatile__ (
> - "0: lr.d %[p], %[c]\n"
> - " addi %[rc], %[p], -1\n"
> - " bltz %[rc], 1f\n"
> - " sc.d.rl %[rc], %[rc], %[c]\n"
> - " bnez %[rc], 0b\n"
> - " fence rw, rw\n"
> - "1:\n"
> - : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
> - :
> - : "memory");
> + _arch_atomic_dec_if_positive(prev, rc, v->counter, "d");
> +
> return prev - 1;
> }
I have no problem with this optimization.
Reviewed-by: Guo Ren <guoren@...nel.org>
>
> --
> 2.41.0
>
Powered by blists - more mailing lists