[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <aMqAPwNmD6HxqUFh@e129823.arm.com>
Date: Wed, 17 Sep 2025 10:32:47 +0100
From: Yeoreum Yun <yeoreum.yun@....com>
To: Will Deacon <will@...nel.org>
Cc: Catalin Marinas <catalin.marinas@....com>, broonie@...nel.org,
maz@...nel.org, oliver.upton@...ux.dev, joey.gouly@....com,
james.morse@....com, ardb@...nel.org, scott@...amperecomputing.com,
suzuki.poulose@....com, yuzenghui@...wei.com, mark.rutland@....com,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH RESEND v7 4/6] arm64: futex: refactor futex atomic
operation
Hi all,
> Hi,
>
> [...]
> > > +#define LSUI_CMPXCHG_HELPER(suffix, start_bit) \
> > > +static __always_inline int \
> > > +__lsui_cmpxchg_helper_##suffix(u64 __user *uaddr, u32 oldval, u32 newval) \
> > > +{ \
> > > + int ret = 0; \
> > > + u64 oval, nval, tmp; \
> > > + \
> > > + asm volatile("//__lsui_cmpxchg_helper_" #suffix "\n" \
> > > + __LSUI_PREAMBLE \
> > > +" prfm pstl1strm, %2\n" \
> > > +"1: ldtr %x1, %2\n" \
> > > +" mov %x3, %x1\n" \
> > > +" bfi %x1, %x5, #" #start_bit ", #32\n" \
> > > +" bfi %x3, %x6, #" #start_bit ", #32\n" \
> > > +" mov %x4, %x1\n" \
> > > +"2: caslt %x1, %x3, %2\n" \
> > > +" sub %x1, %x1, %x4\n" \
> > > +" cbz %x1, 3f\n" \
> > > +" mov %w0, %w7\n" \
> > > +"3:\n" \
> > > +" dmb ish\n" \
> > > +"4:\n" \
> > > + _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) \
> > > + _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0) \
> > > + : "+r" (ret), "=&r" (oval), "+Q" (*uaddr), "=&r" (nval), "=&r" (tmp) \
> > > + : "r" (oldval), "r" (newval), "Ir" (-EAGAIN) \
> > > + : "memory"); \
> >
> > The vast majority of this can be written in C.
>
> Here is the version with C base on patch 6:
>
> diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
> diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
> index 1d6d9f856ac5..68af15ba545a 100644
> --- a/arch/arm64/include/asm/futex.h
> +++ b/arch/arm64/include/asm/futex.h
> @@ -127,81 +127,77 @@ LSUI_FUTEX_ATOMIC_OP(andnot, ldtclr, al)
> LSUI_FUTEX_ATOMIC_OP(set, swpt, al)
>
> static __always_inline int
> -__lsui_futex_atomic_and(int oparg, u32 __user *uaddr, int *oval)
> +__lsui_cmpxchg_helper(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> {
> - return __lsui_futex_atomic_andnot(~oparg, uaddr, oval);
> -}
> + int ret = -EAGAIN;
> + u64 __user *uaddr_al;
> + u64 oval64, nval64, tmp;
> + static const u64 lo32_mask = GENMASK_U64(31, 0);
> +
> + uaddr_al = (u64 __user *) ALIGN_DOWN((unsigned long)uaddr, sizeof(u64));
> + unsafe_get_user(oval64, uaddr_al, err_fault);
> +
> + if ((u32 __user *)uaddr_al != uaddr) {
> + nval64 = ((oval64 & lo32_mask) | ((u64)newval << 32));
> + oval64 = ((oval64 & lo32_mask) | ((u64)oldval << 32));
> + } else {
> + nval64 = ((oval64 & ~lo32_mask) | newval);
> + oval64 = ((oval64 & ~lo32_mask) | oldval);
> + }
>
> -static __always_inline int
> -__lsui_futex_atomic_eor(int oparg, u32 __user *uaddr, int *oval)
> -{
> - unsigned int loops = FUTEX_MAX_LOOPS;
> - int ret, oldval, tmp;
> + tmp = oval64;
>
> - /*
> - * there are no ldteor/stteor instructions...
> - */
> - asm volatile("// __lsui_futex_atomic_eor\n"
> + asm volatile("//__lsui_cmpxchg_helper\n"
> __LSUI_PREAMBLE
> -" prfm pstl1strm, %2\n"
> -"1: ldtxr %w1, %2\n"
> -" eor %w3, %w1, %w5\n"
> -"2: stltxr %w0, %w3, %2\n"
> -" cbz %w0, 3f\n"
> -" sub %w4, %w4, %w0\n"
> -" cbnz %w4, 1b\n"
> -" mov %w0, %w6\n"
> -"3:\n"
> +"1: caslt %x1, %x3, %2\n"
> +" sub %x1, %x1, %x4\n"
> +" cbnz %x1, 2f\n"
> +" mov %w0, %w5\n"
> +"2:\n"
> " dmb ish\n"
> +"3:\n"
> _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0)
> - _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0)
> - : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp),
> - "+r" (loops)
> - : "r" (oparg), "Ir" (-EAGAIN)
> + : "+r" (ret), "=&r" (oval64), "+Q" (*uaddr_al)
> + : "r" (nval64), "r" (tmp), "Ir" (0)
> : "memory");
>
> if (!ret)
> *oval = oldval;
>
> +err_fault:
> return ret;
> }
>
> static __always_inline int
> -__lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> +__lsui_futex_atomic_and(int oparg, u32 __user *uaddr, int *oval)
> {
> - int ret = 0;
> - unsigned int loops = FUTEX_MAX_LOOPS;
> - u32 val, tmp;
> + return __lsui_futex_atomic_andnot(~oparg, uaddr, oval);
> +}
> +
> +static __always_inline int
> +__lsui_futex_atomic_eor(int oparg, u32 __user *uaddr, int *oval)
> +{
> +{
> + int ret = -EAGAIN;
> + u32 oldval, newval;
>
> /*
> - * cas{al}t doesn't support word size...
> + * there are no ldteor/stteor instructions...
> */
> - asm volatile("//__lsui_futex_cmpxchg\n"
> - __LSUI_PREAMBLE
> -" prfm pstl1strm, %2\n"
> -"1: ldtxr %w1, %2\n"
> -" eor %w3, %w1, %w5\n"
> -" cbnz %w3, 4f\n"
> -"2: stltxr %w3, %w6, %2\n"
> -" cbz %w3, 3f\n"
> -" sub %w4, %w4, %w3\n"
> -" cbnz %w4, 1b\n"
> -" mov %w0, %w7\n"
> -"3:\n"
> -" dmb ish\n"
> -"4:\n"
> - _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0)
> - _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0)
> - : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
> - : "r" (oldval), "r" (newval), "Ir" (-EAGAIN)
> - : "memory");
> + unsafe_get_user(oldval, uaddr, err_fault);
> + newval = oldval ^ oparg;
>
> - if (!ret)
> - *oval = oldval;
> + ret = __lsui_cmpxchg_helper(uaddr, oldval, newval, oval);
>
> +err_fault:
> return ret;
> }
>
> +static __always_inline int
> +__lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> +{
> + return __lsui_cmpxchg_helper(uaddr, oldval, newval, oval);
> +}
> +
> #define __lsui_llsc_body(op, ...) \
> ({ \
> alternative_has_cap_likely(ARM64_HAS_LSUI) ? \
> (END)
>
> I'm not sure this is good for you.
> But If you share your thought, That's would be greatful.
> (Note:
> When I test with 256 threads for futex_atomic_eor op, there is not much
> difference with former assembly version)
>
Might be all discussion seems to be going to made cmpxchg with C version.
I'll respin cmpxchg with C version with the missing endianess support.
Thanks.
--
Sincerely,
Yeoreum Yun
Powered by blists - more mailing lists