lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1b2e776e-0ae3-4f48-a2b9-99b486d49368@gaisler.com>
Date: Sat, 16 Nov 2024 09:50:16 +0100
From: Andreas Larsson <andreas@...sler.com>
To: koachan@...tonmail.com, "David S. Miller" <davem@...emloft.net>,
 Andy Lutomirski <luto@...nel.org>, Thomas Gleixner <tglx@...utronix.de>,
 Vincenzo Frascino <vincenzo.frascino@....com>,
 Nathan Chancellor <nathan@...nel.org>,
 Nick Desaulniers <ndesaulniers@...gle.com>, Bill Wendling
 <morbo@...gle.com>, Justin Stitt <justinstitt@...gle.com>
Cc: sparclinux@...r.kernel.org, linux-kernel@...r.kernel.org,
 llvm@...ts.linux.dev
Subject: Re: [PATCH v2] sparc/vdso: Add helper function for 64-bit right shift
 on 32-bit target

On 2024-08-08 04:05, Koakuma via B4 Relay wrote:
> From: Koakuma <koachan@...tonmail.com>
> 
> Add helper function for 64-bit right shift on 32-bit target so that
> clang does not emit a runtime library call.
> 
> Signed-off-by: Koakuma <koachan@...tonmail.com>
> ---
> Hi~
> 
> This adds a small function to do 64-bit right shifts for use in vDSO
> code, needed so that clang does not emit a call to runtime library.
> ---
> Changes in v2:
> - Move __shr64 to sparc code since there are no other users of it.
> - Now that __shr64 is not in portable code, redo it in inline asm for simpler implementation & better performance.
> - Link to v1: https://lore.kernel.org/r/20240804-sparc-shr64-v1-1-25050968339a@protonmail.com
> ---
>  arch/sparc/vdso/vclock_gettime.c | 28 ++++++++++++++++++++++++----
>  1 file changed, 24 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
> index e794edde6755..79607804ea1b 100644
> --- a/arch/sparc/vdso/vclock_gettime.c
> +++ b/arch/sparc/vdso/vclock_gettime.c
> @@ -86,6 +86,11 @@ notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv,
>  }
>  
>  #ifdef	CONFIG_SPARC64
> +notrace static __always_inline u64 __shr64(u64 val, int amt)
> +{
> +	return val >> amt;
> +}
> +
>  notrace static __always_inline u64 vread_tick(void)
>  {
>  	u64	ret;
> @@ -102,6 +107,21 @@ notrace static __always_inline u64 vread_tick_stick(void)
>  	return ret;
>  }
>  #else
> +notrace static __always_inline u64 __shr64(u64 val, int amt)
> +{
> +	u64 ret;
> +
> +	__asm__ __volatile__("sllx %H1, 32, %%g1\n\t"
> +			     "srl %L1, 0, %L1\n\t"
> +			     "or %%g1, %L1, %%g1\n\t"
> +			     "srlx %%g1, %2, %L0\n\t"
> +			     "srlx %L0, 32, %H0"
> +			     : "=r" (ret)
> +			     : "r" (val), "r" (amt)
> +			     : "g1");
> +	return ret;
> +}

Can not residual in bits 63:32 of %L0 potentially pose a problem?


> +
>  notrace static __always_inline u64 vread_tick(void)
>  {
>  	register unsigned long long ret asm("o4");
> @@ -154,7 +174,7 @@ notrace static __always_inline int do_realtime(struct vvar_data *vvar,
>  		ts->tv_sec = vvar->wall_time_sec;
>  		ns = vvar->wall_time_snsec;
>  		ns += vgetsns(vvar);
> -		ns >>= vvar->clock.shift;
> +		ns = __shr64(ns, vvar->clock.shift);
>  	} while (unlikely(vvar_read_retry(vvar, seq)));
>  
>  	ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
> @@ -174,7 +194,7 @@ notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
>  		ts->tv_sec = vvar->wall_time_sec;
>  		ns = vvar->wall_time_snsec;
>  		ns += vgetsns_stick(vvar);
> -		ns >>= vvar->clock.shift;
> +		ns = __shr64(ns, vvar->clock.shift);
>  	} while (unlikely(vvar_read_retry(vvar, seq)));
>  
>  	ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
> @@ -194,7 +214,7 @@ notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
>  		ts->tv_sec = vvar->monotonic_time_sec;
>  		ns = vvar->monotonic_time_snsec;
>  		ns += vgetsns(vvar);
> -		ns >>= vvar->clock.shift;
> +		ns = __shr64(ns, vvar->clock.shift);
>  	} while (unlikely(vvar_read_retry(vvar, seq)));
>  
>  	ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
> @@ -214,7 +234,7 @@ notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
>  		ts->tv_sec = vvar->monotonic_time_sec;
>  		ns = vvar->monotonic_time_snsec;
>  		ns += vgetsns_stick(vvar);
> -		ns >>= vvar->clock.shift;
> +		ns = __shr64(ns, vvar->clock.shift);
>  	} while (unlikely(vvar_read_retry(vvar, seq)));
>  
>  	ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
> 
> ---
> base-commit: defaf1a2113a22b00dfa1abc0fd2014820eaf065
> change-id: 20240717-sparc-shr64-2f00a7884770
> 
> Best regards,

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ