lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 1 Apr 2019 12:12:16 -0700
From:   "Paul E. McKenney" <paulmck@...ux.ibm.com>
To:     Alexander Potapenko <glider@...gle.com>
Cc:     hpa@...or.com, peterz@...radead.org, linux-kernel@...r.kernel.org,
        dvyukov@...gle.com, jyknight@...gle.com, x86@...nel.org,
        mingo@...hat.com
Subject: Re: [PATCH] x86/asm: use memory clobber in bitops that touch
 arbitrary memory

On Mon, Apr 01, 2019 at 06:24:08PM +0200, Alexander Potapenko wrote:
> Certain bit operations that read/write bits take a base pointer and an
> arbitrarily large offset to address the bit relative to that base.
> Inline assembly constraints aren't expressive enough to tell the
> compiler that the assembly directive is going to touch a specific memory
> location of unknown size, therefore we have to use the "memory" clobber
> to indicate that the assembly is going to access memory locations other
> than those listed in the inputs/outputs.
> 
> This particular patch leads to size increase of 124 kernel functions in
> a defconfig build. For some of them the diff is in NOP operations, other
> end up re-reading values from memory and may potentially slow down the
> execution. But without these clobbers the compiler is free to cache
> the contents of the bitmaps and use them as if they weren't changed by
> the inline assembly.
> 
> Signed-off-by: Alexander Potapenko <glider@...gle.com>
> Cc: Dmitry Vyukov <dvyukov@...gle.com>
> Cc: Paul E. McKenney <paulmck@...ux.ibm.com>
> Cc: H. Peter Anvin <hpa@...or.com>
> Cc: Peter Zijlstra <peterz@...radead.org>
> Cc: James Y Knight <jyknight@...gle.com>

Reviewed-by: Paul E. McKenney <paulmck@...ux.ibm.com>

> ---
>  arch/x86/include/asm/bitops.h | 14 +++++++-------
>  1 file changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
> index d153d570bb04..20e4950827d9 100644
> --- a/arch/x86/include/asm/bitops.h
> +++ b/arch/x86/include/asm/bitops.h
> @@ -111,7 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
>  	} else {
>  		asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
>  			: BITOP_ADDR(addr)
> -			: "Ir" (nr));
> +			: "Ir" (nr) : "memory");
>  	}
>  }
>  
> @@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
>  
>  static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
>  {
> -	asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
> +	asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr) : "memory");
>  }
>  
>  static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
> @@ -176,7 +176,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
>   */
>  static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
>  {
> -	asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
> +	asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr) : "memory");
>  }
>  
>  /**
> @@ -197,7 +197,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
>  	} else {
>  		asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
>  			: BITOP_ADDR(addr)
> -			: "Ir" (nr));
> +			: "Ir" (nr) : "memory");
>  	}
>  }
>  
> @@ -243,7 +243,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
>  	asm(__ASM_SIZE(bts) " %2,%1"
>  	    CC_SET(c)
>  	    : CC_OUT(c) (oldbit), ADDR
> -	    : "Ir" (nr));
> +	    : "Ir" (nr) : "memory");
>  	return oldbit;
>  }
>  
> @@ -283,7 +283,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
>  	asm volatile(__ASM_SIZE(btr) " %2,%1"
>  		     CC_SET(c)
>  		     : CC_OUT(c) (oldbit), ADDR
> -		     : "Ir" (nr));
> +		     : "Ir" (nr) : "memory");
>  	return oldbit;
>  }
>  
> @@ -326,7 +326,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
>  	asm volatile(__ASM_SIZE(bt) " %2,%1"
>  		     CC_SET(c)
>  		     : CC_OUT(c) (oldbit)
> -		     : "m" (*(unsigned long *)addr), "Ir" (nr));
> +		     : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
>  
>  	return oldbit;
>  }
> -- 
> 2.21.0.392.gf8f6787159e-goog
> 

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ