lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <2r385158-3144-32rp-38ro-r1opo37qsq1r@syhkavp.arg>
Date: Wed, 29 Oct 2025 15:01:20 -0400 (EDT)
From: Nicolas Pitre <nico@...xnic.net>
To: David Laight <david.laight.linux@...il.com>
cc: Andrew Morton <akpm@...ux-foundation.org>, linux-kernel@...r.kernel.org, 
    u.kleine-koenig@...libre.com, Oleg Nesterov <oleg@...hat.com>, 
    Peter Zijlstra <peterz@...radead.org>, 
    Biju Das <biju.das.jz@...renesas.com>, Borislav Petkov <bp@...en8.de>, 
    Dave Hansen <dave.hansen@...ux.intel.com>, 
    "H. Peter Anvin" <hpa@...or.com>, Ingo Molnar <mingo@...hat.com>, 
    Thomas Gleixner <tglx@...utronix.de>, Li RongQing <lirongqing@...du.com>, 
    Yu Kuai <yukuai3@...wei.com>, Khazhismel Kumykov <khazhy@...omium.org>, 
    Jens Axboe <axboe@...nel.dk>, x86@...nel.org
Subject: Re: [PATCH v4 next 7/9] lib: mul_u64_u64_div_u64() optimise multiply
 on 32bit x86

On Wed, 29 Oct 2025, David Laight wrote:

> gcc generates horrid code for both ((u64)u32_a * u32_b) and (u64_a + u32_b).
> As well as the extra instructions it can generate a lot of spills to stack
> (including spills of constant zeros and even multiplies by constant zero).
> 
> mul_u32_u32() already exists to optimise the multiply.
> Add a similar add_u64_32() for the addition.
> Disable both for clang - it generates better code without them.
> 
> Move the 64x64 => 128 multiply into a static inline helper function
> for code clarity.
> No need for the a/b_hi/lo variables, the implicit casts on the function
> calls do the work for us.
> Should have minimal effect on the generated code.
> 
> Use mul_u32_u32() and add_u64_u32() in the 64x64 => 128 multiply
> in mul_u64_add_u64_div_u64().
> 
> Signed-off-by: David Laight <david.laight.linux@...il.com>

Reviewed-by: Nicolas Pitre <npitre@...libre.com>


> ---
> 
> Changes for v4:
> - merge in patch 8.
> - Add comments about gcc being 'broken' for mixed 32/64 bit maths.
>   clang doesn't have the same issues.
> - use a #defdine for define mul_add() to avoid 'defined but not used'
>   errors.
> 
>  arch/x86/include/asm/div64.h | 19 +++++++++++++++++
>  include/linux/math64.h       | 11 ++++++++++
>  lib/math/div64.c             | 40 +++++++++++++++++++++++-------------
>  3 files changed, 56 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
> index cabdc2d5a68f..a18c045aa8a1 100644
> --- a/arch/x86/include/asm/div64.h
> +++ b/arch/x86/include/asm/div64.h
> @@ -60,6 +60,12 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
>  }
>  #define div_u64_rem	div_u64_rem
>  
> +/*
> + * gcc tends to zero extend 32bit values and do full 64bit maths.
> + * Define asm functions that avoid this.
> + * (clang generates better code for the C versions.)
> + */
> +#ifndef __clang__
>  static inline u64 mul_u32_u32(u32 a, u32 b)
>  {
>  	u32 high, low;
> @@ -71,6 +77,19 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
>  }
>  #define mul_u32_u32 mul_u32_u32
>  
> +static inline u64 add_u64_u32(u64 a, u32 b)
> +{
> +	u32 high = a >> 32, low = a;
> +
> +	asm ("addl %[b], %[low]; adcl $0, %[high]"
> +		: [low] "+r" (low), [high] "+r" (high)
> +		: [b] "rm" (b) );
> +
> +	return low | (u64)high << 32;
> +}
> +#define add_u64_u32 add_u64_u32
> +#endif
> +
>  /*
>   * __div64_32() is never called on x86, so prevent the
>   * generic definition from getting built.
> diff --git a/include/linux/math64.h b/include/linux/math64.h
> index e889d850b7f1..cc305206d89f 100644
> --- a/include/linux/math64.h
> +++ b/include/linux/math64.h
> @@ -158,6 +158,17 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
>  }
>  #endif
>  
> +#ifndef add_u64_u32
> +/*
> + * Many a GCC version also messes this up.
> + * Zero extending b and then spilling everything to stack.
> + */
> +static inline u64 add_u64_u32(u64 a, u32 b)
> +{
> +	return a + b;
> +}
> +#endif
> +
>  #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
>  
>  #ifndef mul_u64_u32_shr
> diff --git a/lib/math/div64.c b/lib/math/div64.c
> index f92e7160feb6..f6da7b5fb69e 100644
> --- a/lib/math/div64.c
> +++ b/lib/math/div64.c
> @@ -186,33 +186,45 @@ EXPORT_SYMBOL(iter_div_u64_rem);
>  #endif
>  
>  #if !defined(mul_u64_add_u64_div_u64) || defined(test_mul_u64_add_u64_div_u64)
> -u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
> -{
> +
> +#define mul_add(a, b, c) add_u64_u32(mul_u32_u32(a, b), c)
> +
>  #if defined(__SIZEOF_INT128__) && !defined(test_mul_u64_add_u64_div_u64)
>  
> +static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
> +{
>  	/* native 64x64=128 bits multiplication */
>  	u128 prod = (u128)a * b + c;
> -	u64 n_lo = prod, n_hi = prod >> 64;
> +
> +	*p_lo = prod;
> +	return prod >> 64;
> +}
>  
>  #else
>  
> -	/* perform a 64x64=128 bits multiplication manually */
> -	u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
> +static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
> +{
> +	/* perform a 64x64=128 bits multiplication in 32bit chunks */
>  	u64 x, y, z;
>  
>  	/* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
> -	x = (u64)a_lo * b_lo + (u32)c;
> -	y = (u64)a_lo * b_hi + (u32)(c >> 32);
> -	y += (u32)(x >> 32);
> -	z = (u64)a_hi * b_hi + (u32)(y >> 32);
> -	y = (u64)a_hi * b_lo + (u32)y;
> -	z += (u32)(y >> 32);
> -	x = (y << 32) + (u32)x;
> -
> -	u64 n_lo = x, n_hi = z;
> +	x = mul_add(a, b, c);
> +	y = mul_add(a, b >> 32, c >> 32);
> +	y = add_u64_u32(y, x >> 32);
> +	z = mul_add(a >> 32, b >> 32, y >> 32);
> +	y = mul_add(a >> 32, b, y);
> +	*p_lo = (y << 32) + (u32)x;
> +	return add_u64_u32(z, y >> 32);
> +}
>  
>  #endif
>  
> +u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
> +{
> +	u64 n_lo, n_hi;
> +
> +	n_hi = mul_u64_u64_add_u64(&n_lo, a, b, c);
> +
>  	if (unlikely(n_hi >= d)) {
>  		/* trigger runtime exception if divisor is zero */
>  		if (d == 0) {
> -- 
> 2.39.5
> 
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ