lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Date:   Fri, 5 May 2017 18:58:23 +0200
From:   Borislav Petkov <bp@...en8.de>
To:     Alexey Dobriyan <adobriyan@...il.com>
Cc:     x86@...nel.org, tglx@...utronix.de, mingo@...hat.com,
        hpa@...or.com, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 3/5] x86_64: rename clear_page() and copy_user() variants

On Wed, Apr 26, 2017 at 09:30:47PM +0300, Alexey Dobriyan wrote:
> Patch changes market-ish acronyms like ERMS and chatty names
> to consistent and shorter versions:
> 
> 	xxx_mov
> 	xxx_rep_stosq	xxx_rep_movsq
> 	xxx_rep_stosb	xxx_rep_movsb
> 
> Signed-off-by: Alexey Dobriyan <adobriyan@...il.com>
> ---
> 
>  arch/x86/include/asm/page_64.h    |   12 ++++++------
>  arch/x86/include/asm/uaccess_64.h |   18 +++++++++---------
>  arch/x86/lib/clear_page_64.S      |   18 +++++++++---------
>  arch/x86/lib/copy_user_64.S       |   20 ++++++++++----------
>  tools/perf/ui/browsers/annotate.c |    2 +-
>  5 files changed, 35 insertions(+), 35 deletions(-)
> 
> --- a/arch/x86/include/asm/page_64.h
> +++ b/arch/x86/include/asm/page_64.h
> @@ -35,15 +35,15 @@ extern unsigned long __phys_addr_symbol(unsigned long);
>  #define pfn_valid(pfn)          ((pfn) < max_pfn)
>  #endif
>  
> -void clear_page_orig(void *page);
> -void clear_page_rep(void *page);
> -void clear_page_erms(void *page);
> +void clear_page_mov(void *page);
> +void clear_page_rep_stosq(void *page);
> +void clear_page_rep_stosb(void *page);
>  
>  static inline void clear_page(void *page)
>  {
> -	alternative_call_2(clear_page_orig,
> -			   clear_page_rep, X86_FEATURE_REP_GOOD,
> -			   clear_page_erms, X86_FEATURE_ERMS,
> +	alternative_call_2(clear_page_mov,
> +			   clear_page_rep_stosq, X86_FEATURE_REP_GOOD,
> +			   clear_page_rep_stosb, X86_FEATURE_ERMS,
>  			   "=D" (page),
>  			   "0" (page)
>  			   : "memory", "rax", "rcx");
> --- a/arch/x86/include/asm/uaccess_64.h
> +++ b/arch/x86/include/asm/uaccess_64.h
> @@ -18,11 +18,11 @@
>  
>  /* Handles exceptions in both to and from, but doesn't do access_ok */
>  __must_check unsigned long
> -copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
> +copy_user_rep_movsb(void *to, const void *from, unsigned len);

WARNING: Prefer 'unsigned int' to bare use of 'unsigned'
#62: FILE: arch/x86/include/asm/uaccess_64.h:21:
+copy_user_rep_movsb(void *to, const void *from, unsigned len);

Pls convert them while at it.

>  __must_check unsigned long
> -copy_user_generic_string(void *to, const void *from, unsigned len);
> +copy_user_rep_movsq(void *to, const void *from, unsigned len);
>  __must_check unsigned long
> -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
> +copy_user_mov(void *to, const void *from, unsigned len);
>  
>  static __always_inline __must_check unsigned long
>  copy_user_generic(void *to, const void *from, unsigned len)
> @@ -30,14 +30,14 @@ copy_user_generic(void *to, const void *from, unsigned len)
>  	unsigned ret;
>  
>  	/*
> -	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
> -	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
> -	 * Otherwise, use copy_user_generic_unrolled.
> +	 * If CPU has ERMS feature, use copy_user_rep_movsb.
> +	 * Otherwise, if CPU has rep_good feature, use copy_user_rep_movsq.

REP_GOOD, while you're at it. Also, end function names with ().

> +	 * Otherwise, use copy_user_mov.
>  	 */
> -	alternative_call_2(copy_user_generic_unrolled,
> -			 copy_user_generic_string,
> +	alternative_call_2(copy_user_mov,
> +			 copy_user_rep_movsq,
>  			 X86_FEATURE_REP_GOOD,
> -			 copy_user_enhanced_fast_string,
> +			 copy_user_rep_movsb,
>  			 X86_FEATURE_ERMS,
>  			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
>  				     "=d" (len)),
> --- a/arch/x86/lib/clear_page_64.S
> +++ b/arch/x86/lib/clear_page_64.S
> @@ -14,15 +14,15 @@
>   * Zero a page.
>   * %rdi	- page
>   */
> -ENTRY(clear_page_rep)
> +ENTRY(clear_page_rep_stosq)
>  	movl $4096/8,%ecx
>  	xorl %eax,%eax
>  	rep stosq
>  	ret
> -ENDPROC(clear_page_rep)
> -EXPORT_SYMBOL_GPL(clear_page_rep)
> +ENDPROC(clear_page_rep_stosq)
> +EXPORT_SYMBOL_GPL(clear_page_rep_stosq)
>  
> -ENTRY(clear_page_orig)
> +ENTRY(clear_page_mov)
>  	xorl   %eax,%eax
>  	movl   $4096/64,%ecx
>  	.p2align 4
> @@ -41,13 +41,13 @@ ENTRY(clear_page_orig)
>  	jnz	.Lloop
>  	nop
>  	ret
> -ENDPROC(clear_page_orig)
> -EXPORT_SYMBOL_GPL(clear_page_orig)
> +ENDPROC(clear_page_mov)
> +EXPORT_SYMBOL_GPL(clear_page_mov)

Same issue as with the previous patch: _orig was dumb but since you're
changing the names, pls change them to something more descriptive.

> -ENTRY(clear_page_erms)
> +ENTRY(clear_page_rep_stosb)
>  	movl $4096,%ecx
>  	xorl %eax,%eax
>  	rep stosb
>  	ret
> -ENDPROC(clear_page_erms)
> -EXPORT_SYMBOL_GPL(clear_page_erms)
> +ENDPROC(clear_page_rep_stosb)
> +EXPORT_SYMBOL_GPL(clear_page_rep_stosb)
> --- a/arch/x86/lib/copy_user_64.S
> +++ b/arch/x86/lib/copy_user_64.S
> @@ -17,7 +17,7 @@
>  #include <asm/export.h>
>  
>  /*
> - * copy_user_generic_unrolled - memory copy with exception handling.
> + * copy_user_mov - memory copy with exception handling.

This rename is actually losing information from the function name:
"generic_unrolled" explains exactly what the function does.

>   * This version is for CPUs like P4 that don't have efficient micro
>   * code for rep movsq
>   *

-- 
Regards/Gruss,
    Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ