lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite for Android: free password hash cracker in your pocket
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 28 Jun 2019 12:21:21 +0200
From:   Peter Zijlstra <peterz@...radead.org>
To:     x86@...nel.org, peterz@...radead.org, linux-kernel@...r.kernel.org
Cc:     Josh Poimboeuf <jpoimboe@...hat.com>,
        Steven Rostedt <rostedt@...dmis.org>,
        Masami Hiramatsu <mhiramat@...nel.org>,
        Daniel Bristot de Oliveira <bristot@...hat.com>,
        Jason Baron <jbaron@...mai.com>, Nadav Amit <namit@...are.com>,
        Andy Lutomirski <luto@...nel.org>,
        Eugeniy Paltsev <Eugeniy.Paltsev@...opsys.com>,
        Vineet Gupta <Vineet.Gupta1@...opsys.com>
Subject: [RFC][PATCH 8/8] jump_label, x86: Enable JMP8/NOP2 support

Enable and emit short JMP/NOP jump_label entries.

Much thanks to Josh for (re)discovering the .skip trick to
conditionally emit variable length text.

Due to how early we enable jump_labels on x86, if any of this comes
apart, the machine is completely dead. Qemu+GDB saved the day this
time.

Signed-off-by: Peter Zijlstra (Intel) <peterz@...radead.org>
---
 arch/x86/include/asm/jump_label.h |   37 +++++++++++++++++++++++++++++++------
 arch/x86/kernel/jump_label.c      |    5 ++++-
 2 files changed, 35 insertions(+), 7 deletions(-)

--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -31,7 +31,35 @@
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:"
-		".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
+
+		".set disp, %l[l_yes] - (1b + 2) \n\t"
+		".set sign, disp >> 31 \n\t"
+		".set res, (disp >> 7) ^ sign \n\t"
+		".set is_byte, -(res == 0) \n\t"
+		".set is_long, -(res != 0) \n\t"
+
+#ifdef CONFIG_X86_64
+		".skip is_byte, 0x66 \n\t"
+		".skip is_byte, 0x90 \n\t"
+#else
+		".skip is_byte, 0x89 \n\t"
+		".skip is_byte, 0xf6 \n\t"
+#endif
+
+#ifdef CONFIG_X86_64
+		".skip is_long, 0x0f \n\t"
+		".skip is_long, 0x1f \n\t"
+		".skip is_long, 0x44 \n\t"
+		".skip is_long, 0x00 \n\t"
+		".skip is_long, 0x00 \n\t"
+#else
+		".skip is_long, 0x3e \n\t"
+		".skip is_long, 0x8d \n\t"
+		".skip is_long, 0x74 \n\t"
+		".skip is_long, 0x26 \n\t"
+		".skip is_long, 0x00 \n\t"
+#endif
+
 		JUMP_TABLE_ENTRY
 		: :  "i" (key), "i" (branch) : : l_yes);
 
@@ -43,8 +71,7 @@ static __always_inline bool arch_static_
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:"
-		".byte 0xe9 \n\t"
-		".long %l[l_yes] - (. + 4) \n\t"
+		"jmp %l[l_yes] \n\t"
 		JUMP_TABLE_ENTRY
 		: :  "i" (key), "i" (branch) : : l_yes);
 
@@ -59,9 +86,7 @@ extern int arch_jump_entry_size(struct j
 
 .macro STATIC_BRANCH_FALSE_LIKELY target, key
 .Lstatic_jump_\@:
-	/* Equivalent to "jmp.d32 \target" */
-	.byte		0xe9
-	.long		\target - (. + 4)
+	jmp \target
 
 	.pushsection __jump_table, "aw"
 	_ASM_ALIGN
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -29,7 +29,10 @@ union jump_code_union {
 
 static inline bool __jump_disp_is_byte(s32 disp)
 {
-	return false;
+	s32 sign;
+	disp -= JMP8_INSN_SIZE;
+	sign = disp >> 31;
+	return ((disp >> 7) ^ sign) == 0;
 }
 
 int arch_jump_entry_size(struct jump_entry *entry)


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ