[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aMks2haYCZia8LR/@e129823.arm.com>
Date: Tue, 16 Sep 2025 10:24:42 +0100
From: Yeoreum Yun <yeoreum.yun@....com>
To: Catalin Marinas <catalin.marinas@....com>
Cc: Will Deacon <will@...nel.org>, broonie@...nel.org, maz@...nel.org,
oliver.upton@...ux.dev, joey.gouly@....com, james.morse@....com,
ardb@...nel.org, scott@...amperecomputing.com,
suzuki.poulose@....com, yuzenghui@...wei.com, mark.rutland@....com,
linux-arm-kernel@...ts.infradead.org, kvmarm@...ts.linux.dev,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH RESEND v7 4/6] arm64: futex: refactor futex atomic
operation
Sorry, Ignore this. I've sent wrong this :(
I'll send it again.
> Hi,
>
> > On Mon, Sep 15, 2025 at 09:35:55PM +0100, Will Deacon wrote:
> > > On Mon, Sep 15, 2025 at 08:40:33PM +0100, Catalin Marinas wrote:
> > > > On Mon, Sep 15, 2025 at 11:32:39AM +0100, Yeoreum Yun wrote:
> > > > > So I think it would be better to keep the current LLSC implementation
> > > > > in LSUI.
> > > >
> > > > I think the code would look simpler with LL/SC but you can give it a try
> > > > and post the code sample here (not in a new series).
> > >
> > > If you stick the cas*t instruction in its own helper say, cmpxchg_user(),
> > > then you can do all the shifting/masking in C and I don't reckon it's
> > > that bad. It means we (a) get rid of exclusives, which is the whole
> > > point of this and (b) don't have to mess around with PAN.
> >
> > We get rid of PAN toggling already since FEAT_LSUI introduces
> > LDTXR/STTXR. But, I'm all for CAS if it doesn't look too bad. Easier
> > I think if we do a get_user() of a u64 and combine it with the futex u32
> > while taking care of CPU endianness. All in a loop. Hopefully the
> > compiler is smart enough to reduce masking/or'ing to fewer instructions.
> >
>
> Hmm, I think sure shifting/masking can be replace by single bfi
> instruction like:
>
> diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
> index 1d6d9f856ac5..30da0006c0c8 100644
> --- a/arch/arm64/include/asm/futex.h
> +++ b/arch/arm64/include/asm/futex.h
> @@ -126,6 +126,59 @@ LSUI_FUTEX_ATOMIC_OP(or, ldtset, al)
> LSUI_FUTEX_ATOMIC_OP(andnot, ldtclr, al)
> LSUI_FUTEX_ATOMIC_OP(set, swpt, al)
>
> +
> +#define LSUI_CMPXCHG_HELPER(suffix, start_bit) \
> +static __always_inline int \
> +__lsui_cmpxchg_helper_##suffix(u64 __user *uaddr, u32 oldval, u32 newval) \
> +{ \
> + int ret = 0; \
> + u64 oval, nval, tmp; \
> + \
> + asm volatile("//__lsui_cmpxchg_helper_" #suffix "\n" \
> + __LSUI_PREAMBLE \
> +" prfm pstl1strm, %2\n" \
> +"1: ldtr %x1, %2\n" \
> +" bfi %x1, %x5, #" #start_bit ", #32\n" \
> +" bfi %x1, %x6, #" #start_bit ", #32\n" \
> +" mov %x4, %x5\n" \
> +"2: caslt %x5, %x6, %2\n" \
> +" sub %x4, %x4, %x5\n" \
> +" cbz %x4, 3f\n" \
> +" mov %w0, %w7\n" \
> +"3:\n" \
> +" dmb ish\n" \
> +"4:\n" \
> + _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) \
> + _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0) \
> + : "+r" (ret), "=&r" (oval), "+Q" (*uaddr), "=&r" (nval), "=&r" (tmp) \
> + : "r" (oldval), "r" (newval), "Ir" (-EAGAIN) \
> + : "memory"); \
> + \
> + return ret; \
> +}
> +
> +LSUI_CMPXCHG_HELPER(lo, 0)
> +LSUI_CMPXCHG_HELPER(hi, 32)
> +
> +static __always_inline int
> +__lsui_cmpxchg_helper(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> +{
> + int ret;
> + unsigned long uaddr_al;
> +
> + uaddr_al = ALIGN_DOWN((unsigned long)uaddr, sizeof(u64));
> +
> + if (uaddr_al != (unsigned long)uaddr)
> + ret = __lsui_cmpxchg_helper_hi((u64 __user *)uaddr_al, oldval, newval);
> + else
> + ret = __lsui_cmpxchg_helper_lo((u64 __user *)uaddr_al, oldval, newval);
> +
> + if (!ret)
> + *oval = oldval;
> +
> + return ret;
> +}
> +
> static __always_inline int
> __lsui_futex_atomic_and(int oparg, u32 __user *uaddr, int *oval)
> {
> @@ -135,71 +188,25 @@ __lsui_futex_atomic_and(int oparg, u32 __user *uaddr, int *oval)
> static __always_inline int
> __lsui_futex_atomic_eor(int oparg, u32 __user *uaddr, int *oval)
> {
> - unsigned int loops = FUTEX_MAX_LOOPS;
> - int ret, oldval, tmp;
> + int ret = -EAGAIN;
> + u32 oldval, newval;
>
> /*
> * there are no ldteor/stteor instructions...
> */
> - asm volatile("// __lsui_futex_atomic_eor\n"
> - __LSUI_PREAMBLE
> -" prfm pstl1strm, %2\n"
> -"1: ldtxr %w1, %2\n"
> -" eor %w3, %w1, %w5\n"
> -"2: stltxr %w0, %w3, %2\n"
> -" cbz %w0, 3f\n"
> -" sub %w4, %w4, %w0\n"
> -" cbnz %w4, 1b\n"
> -" mov %w0, %w6\n"
> -"3:\n"
> -" dmb ish\n"
> - _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0)
> - _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0)
> - : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp),
> - "+r" (loops)
> - : "r" (oparg), "Ir" (-EAGAIN)
> - : "memory");
> + unsafe_get_user(oldval, uaddr, err_fault);
> + newval = oldval ^ oparg;
>
> - if (!ret)
> - *oval = oldval;
> + ret = __lsui_cmpxchg_helper(uaddr, oldval, newval, oval);
>
> +err_fault:
> return ret;
> }
>
> static __always_inline int
> __lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> {
> - int ret = 0;
> - unsigned int loops = FUTEX_MAX_LOOPS;
> - u32 val, tmp;
> -
> - /*
> - * cas{al}t doesn't support word size...
> - */
> - asm volatile("//__lsui_futex_cmpxchg\n"
> - __LSUI_PREAMBLE
> -" prfm pstl1strm, %2\n"
> -"1: ldtxr %w1, %2\n"
> -" eor %w3, %w1, %w5\n"
> -" cbnz %w3, 4f\n"
> -"2: stltxr %w3, %w6, %2\n"
> -" cbz %w3, 3f\n"
> -" sub %w4, %w4, %w3\n"
> -" cbnz %w4, 1b\n"
> -" mov %w0, %w7\n"
> -"3:\n"
> -" dmb ish\n"
> -"4:\n"
> - _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0)
> - _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0)
> - : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
> - : "r" (oldval), "r" (newval), "Ir" (-EAGAIN)
> - : "memory");
> -
> - if (!ret)
> - *oval = oldval;
> -
> - return ret;
> + return __lsui_cmpxchg_helper(uaddr, oldval, newval, oval);
> }
>
> #define __lsui_llsc_body(op, ...) \
>
>
> This is based on the patch #6.
>
> Am I missing something?
>
> --
> Sincerely,
> Yeoreum Yun
>
--
Sincerely,
Yeoreum Yun
Powered by blists - more mailing lists