lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20180620130624.GC2530@hirez.programming.kicks-ass.net>
Date:   Wed, 20 Jun 2018 15:06:24 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     Nadav Amit <namit@...are.com>
Cc:     linux-kernel@...r.kernel.org, x86@...nel.org,
        Thomas Gleixner <tglx@...utronix.de>,
        Ingo Molnar <mingo@...hat.com>,
        "H. Peter Anvin" <hpa@...or.com>,
        Greg Kroah-Hartman <gregkh@...uxfoundation.org>,
        Kate Stewart <kstewart@...uxfoundation.org>,
        Philippe Ombredanne <pombredanne@...b.com>
Subject: Re: [PATCH v5 9/9] x86: jump-labels: use macros instead of inline
 assembly

On Tue, Jun 19, 2018 at 12:48:54PM -0700, Nadav Amit wrote:
> diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
> index 8c0de4282659..f321a50e6727 100644
> --- a/arch/x86/include/asm/jump_label.h
> +++ b/arch/x86/include/asm/jump_label.h
> @@ -108,6 +99,26 @@ struct jump_entry {
>  	.popsection
>  .endm
>  
> +.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req
> +1:
> +	.byte STATIC_KEY_INIT_NOP
> +	.pushsection __jump_table, "aw"
> +	_ASM_ALIGN
> +	_ASM_PTR 1b, \l_yes, \key + \branch
> +	.popsection
> +.endm
> +
> +.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req
> +1:
> +	.byte 0xe9
> +	.long \l_yes - 2f
> +2:
> +	.pushsection __jump_table, "aw"
> +	_ASM_ALIGN
> +	_ASM_PTR 1b, \l_yes, \key + \branch
> +	.popsection
> +.endm
> +
>  #endif	/* __ASSEMBLY__ */
>  
>  #endif


This also allows the below, the old macros were slightly easier to
understand, but not in wide use, in fact, only a single user, so I see
no point in preserving them.

---
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -67,38 +67,6 @@ struct jump_entry {
 
 #else	/* __ASSEMBLY__ */
 
-.macro STATIC_JUMP_IF_TRUE target, key, def
-.Lstatic_jump_\@:
-	.if \def
-	/* Equivalent to "jmp.d32 \target" */
-	.byte		0xe9
-	.long		\target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
-	.else
-	.byte		STATIC_KEY_INIT_NOP
-	.endif
-	.pushsection __jump_table, "aw"
-	_ASM_ALIGN
-	_ASM_PTR	.Lstatic_jump_\@, \target, \key
-	.popsection
-.endm
-
-.macro STATIC_JUMP_IF_FALSE target, key, def
-.Lstatic_jump_\@:
-	.if \def
-	.byte		STATIC_KEY_INIT_NOP
-	.else
-	/* Equivalent to "jmp.d32 \target" */
-	.byte		0xe9
-	.long		\target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
-	.endif
-	.pushsection __jump_table, "aw"
-	_ASM_ALIGN
-	_ASM_PTR	.Lstatic_jump_\@, \target, \key + 1
-	.popsection
-.endm
-
 .macro STATIC_BRANCH_NOP l_yes:req key:req branch:req
 1:
 	.byte STATIC_KEY_INIT_NOP
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 352e70cd33e8..7c0d2ace8839 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -338,7 +338,7 @@ For 32-bit we have the following conventions - kernel is built with
 .macro CALL_enter_from_user_mode
 #ifdef CONFIG_CONTEXT_TRACKING
 #ifdef HAVE_JUMP_LABEL
-	STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
+	STATIC_BRANCH_JMP .Lafter_call_\@, context_tracking_enabled, 1
 #endif
 	call enter_from_user_mode
 .Lafter_call_\@:

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ