lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <606e3af9f66b1d27f7a9885d9d6171b2585c2fe4.1541711457.git.jpoimboe@redhat.com>
Date:   Thu,  8 Nov 2018 15:15:52 -0600
From:   Josh Poimboeuf <jpoimboe@...hat.com>
To:     linux-kernel@...r.kernel.org
Cc:     x86@...nel.org, Ard Biesheuvel <ard.biesheuvel@...aro.org>,
        Andy Lutomirski <luto@...nel.org>,
        Steven Rostedt <rostedt@...dmis.org>,
        Peter Zijlstra <peterz@...radead.org>,
        Ingo Molnar <mingo@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Linus Torvalds <torvalds@...ux-foundation.org>,
        Masami Hiramatsu <mhiramat@...nel.org>,
        Jason Baron <jbaron@...mai.com>, Jiri Kosina <jkosina@...e.cz>,
        David Laight <David.Laight@...LAB.COM>,
        Borislav Petkov <bp@...en8.de>
Subject: [RFC PATCH 2/3] x86/static_call: Add x86 unoptimized static call implementation

Add the x86 unoptimized static call implementation.  For each key, it
creates a permanent trampoline which is the destination for all static
calls for the given key.  The trampoline has a direct jump which gets
patched by static_call_update() when the destination changes.

Signed-off-by: Josh Poimboeuf <jpoimboe@...hat.com>
---
 arch/x86/Kconfig                   |  1 +
 arch/x86/include/asm/static_call.h | 18 +++++++++++
 arch/x86/kernel/Makefile           |  1 +
 arch/x86/kernel/static_call.c      | 51 ++++++++++++++++++++++++++++++
 4 files changed, 71 insertions(+)
 create mode 100644 arch/x86/include/asm/static_call.h
 create mode 100644 arch/x86/kernel/static_call.c

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b5286ad2a982..9a83c3edd839 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -189,6 +189,7 @@ config X86
 	select HAVE_FUNCTION_ARG_ACCESS_API
 	select HAVE_STACKPROTECTOR		if CC_HAS_SANE_STACKPROTECTOR
 	select HAVE_STACK_VALIDATION		if X86_64
+	select HAVE_STATIC_CALL_UNOPTIMIZED
 	select HAVE_RSEQ
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_UNSTABLE_SCHED_CLOCK
diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
new file mode 100644
index 000000000000..de6b032cf809
--- /dev/null
+++ b/arch/x86/include/asm/static_call.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_STATIC_CALL_H
+#define _ASM_STATIC_CALL_H
+
+/*
+ * This is a permanent trampoline which is the destination for all static calls
+ * for the given key.  The direct jump gets patched by static_call_update().
+ */
+#define ARCH_STATIC_CALL_TRAMP(key, func)				\
+	asm(".pushsection .text, \"ax\"				\n"	\
+	    ".align 4						\n"	\
+	    ".globl " STATIC_CALL_TRAMP_STR(key) "		\n"	\
+	    ".type " STATIC_CALL_TRAMP_STR(key) ", @function	\n"	\
+	    STATIC_CALL_TRAMP_STR(key) ":			\n"	\
+	    "jmp " #func "					\n"	\
+	    ".popsection					\n")
+
+#endif /* _ASM_STATIC_CALL_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 8824d01c0c35..82acc8a28429 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -62,6 +62,7 @@ obj-y			+= tsc.o tsc_msr.o io_delay.o rtc.o
 obj-y			+= pci-iommu_table.o
 obj-y			+= resource.o
 obj-y			+= irqflags.o
+obj-y			+= static_call.o
 
 obj-y				+= process.o
 obj-y				+= fpu/
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
new file mode 100644
index 000000000000..47ddc655ccda
--- /dev/null
+++ b/arch/x86/kernel/static_call.c
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/static_call.h>
+#include <linux/memory.h>
+#include <linux/bug.h>
+#include <asm/text-patching.h>
+#include <asm/nospec-branch.h>
+
+#define CALL_INSN_SIZE 5
+
+void static_call_bp_handler(void);
+void *bp_handler_dest;
+
+asm(".pushsection .text, \"ax\"						\n"
+    ".globl static_call_bp_handler					\n"
+    ".type static_call_bp_handler, @function				\n"
+    "static_call_bp_handler:						\n"
+    "ANNOTATE_RETPOLINE_SAFE						\n"
+    "jmp *bp_handler_dest						\n"
+    ".popsection							\n");
+
+void arch_static_call_transform(unsigned long insn, void *dest)
+{
+	s32 dest_relative;
+	unsigned char insn_opcode;
+	unsigned char opcodes[CALL_INSN_SIZE];
+
+	mutex_lock(&text_mutex);
+
+	insn_opcode = *(unsigned char *)insn;
+	if (insn_opcode != 0xe8 && insn_opcode != 0xe9) {
+		WARN_ONCE(1, "unexpected static call insn opcode 0x%x at %pS",
+			  insn_opcode, (void *)insn);
+		goto done;
+	}
+
+	dest_relative = (long)(dest) - (long)(insn + CALL_INSN_SIZE);
+
+	opcodes[0] = insn_opcode;
+	memcpy(&opcodes[1], &dest_relative, CALL_INSN_SIZE - 1);
+
+	/* Set up the variable for the breakpoint handler: */
+	bp_handler_dest = dest;
+
+	/* Patch the call site: */
+	text_poke_bp((void *)insn, opcodes, CALL_INSN_SIZE,
+		     static_call_bp_handler);
+
+done:
+	mutex_unlock(&text_mutex);
+}
+EXPORT_SYMBOL_GPL(arch_static_call_transform);
-- 
2.17.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ