[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y1mCZT8ejRpvCfw6@spud>
Date: Wed, 26 Oct 2022 19:54:29 +0100
From: Conor Dooley <conor@...nel.org>
To: Wen Yao <haiwenyao@...ontech.com>
Cc: paul.walmsley@...ive.com, palmer@...belt.com,
aou@...s.berkeley.edu, dennis@...nel.org, tj@...nel.org,
cl@...ux.com, linux-riscv@...ts.infradead.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH 1/2] riscv: percpu:Add riscv percpu operations
Hey Wen Yao,
Couple comments for you.
On Wed, Oct 26, 2022 at 06:40:14PM +0800, Wen Yao wrote:
> riscv: percpu:Add riscv percpu operations
Can you please consistently use ": " between parts of the commit
messages? For both this and patch 2/2.
> This patch use riscv AMO(Atomic Memory Operation) instructions to
nit: s/This patch/Use (or better:
"Optimise some ... using RISC-V AMO (Atomic..."
> optimise some this_cpu_and this_cpu_or this_cpu_add operations.
> It reuse cmpxchg_local() to impletment this_cpu_cmpxchg macros.
s/It Reuse/Reuse, and "impletment" is a typo.
> It reuse xchg_relaxed() to impletment this_cpu_xchg macros.
>
> Signed-off-by: Wen Yao <haiwenyao@...ontech.com>
> ---
> arch/riscv/include/asm/percpu.h | 101 ++++++++++++++++++++++++++++++++
> 1 file changed, 101 insertions(+)
> create mode 100644 arch/riscv/include/asm/percpu.h
>
> diff --git a/arch/riscv/include/asm/percpu.h b/arch/riscv/include/asm/percpu.h
> new file mode 100644
> index 000000000000..ae796e328442
> --- /dev/null
> +++ b/arch/riscv/include/asm/percpu.h
> @@ -0,0 +1,101 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (C) 2020-2022 Union Tech Software Technology Corporation Limited
> + */
> +#ifndef __ASM_PERCPU_H
> +#define __ASM_PERCPU_H
> +
> +#include <asm/cmpxchg.h>
> +
> +#define PERCPU_OP(op, asm_op, c_op) \
> + static inline unsigned long __percpu_##op(void *ptr, \
Can you please make sure that these \s are actually aligned & swap the
spaces you've used for tabs? The other files that I checked in this
directory all use tabs for \ alignment in macros.
Thanks,
Conor.
> + unsigned long val, int size) \
> + { \
> + unsigned long ret; \
> + switch (size) { \
> + case 4: \
> + __asm__ __volatile__( \
> + "amo" #asm_op ".w" \
> + " %[ret], %[val], %[ptr]\n" \
> + : [ret] "=&r"(ret), [ptr] "+A"(*(u32 *)ptr) \
> + : [val] "r"(val)); \
> + break; \
> + case 8: \
> + __asm__ __volatile__( \
> + "amo" #asm_op ".d" \
> + " %[ret], %[val], %[ptr]\n" \
> + : [ret] "=&r"(ret), [ptr] "+A"(*(u64 *)ptr) \
> + : [val] "r"(val)); \
> + break; \
> + default: \
> + ret = 0; \
> + BUILD_BUG(); \
> + } \
> + \
> + return ret c_op val; \
> + }
> +
> +PERCPU_OP(add, add, +)
> +PERCPU_OP(and, and, &)
> +PERCPU_OP(or, or, |)
> +#undef PERCPU_OP
> +
> +/* this_cpu_xchg */
> +#define _protect_xchg_local(pcp, val) \
> + ({ \
> + typeof(*raw_cpu_ptr(&(pcp))) __ret; \
> + preempt_disable_notrace(); \
> + __ret = xchg_relaxed(raw_cpu_ptr(&(pcp)), val); \
> + preempt_enable_notrace(); \
> + __ret; \
> + })
> +
> +/* this_cpu_cmpxchg */
> +#define _protect_cmpxchg_local(pcp, o, n) \
> + ({ \
> + typeof(*raw_cpu_ptr(&(pcp))) __ret; \
> + preempt_disable_notrace(); \
> + __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
> + preempt_enable_notrace(); \
> + __ret; \
> + })
> +
> +#define _pcp_protect(operation, pcp, val) \
> + ({ \
> + typeof(pcp) __retval; \
> + preempt_disable_notrace(); \
> + __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), (val), \
> + sizeof(pcp)); \
> + preempt_enable_notrace(); \
> + __retval; \
> + })
> +
> +#define _percpu_add(pcp, val) _pcp_protect(__percpu_add, pcp, val)
> +
> +#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
> +
> +#define _percpu_and(pcp, val) _pcp_protect(__percpu_and, pcp, val)
> +
> +#define _percpu_or(pcp, val) _pcp_protect(__percpu_or, pcp, val)
> +
> +#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
> +#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
> +
> +#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
> +#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
> +
> +#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
> +#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
> +
> +#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
> +#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
> +
> +#define this_cpu_xchg_4(pcp, val) _protect_xchg_local(pcp, val)
> +#define this_cpu_xchg_8(pcp, val) _protect_xchg_local(pcp, val)
> +
> +#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
> +#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
> +
> +#include <asm-generic/percpu.h>
> +
> +#endif /* __ASM_PERCPU_H */
> --
> 2.25.1
>
Powered by blists - more mailing lists