lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180115064742.25324-3-alankao@andestech.com>
Date:   Mon, 15 Jan 2018 14:47:38 +0800
From:   Alan Kao <nonerkao@...il.com>
To:     Palmer Dabbelt <palmer@...ive.com>, Albert Ou <albert@...ive.com>,
        Christoph Hellwig <hch@....de>,
        Steven Rostedt <rostedt@...dmis.org>,
        Ingo Molnar <mingo@...hat.com>,
        Masahiro Yamada <yamada.masahiro@...ionext.com>,
        Kamil Rytarowski <n54@....com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        patches@...ups.riscv.org, linux-kernel@...r.kernel.org
Cc:     Alan Kao <alankao@...estech.com>,
        Greentime Hu <greentime@...estech.com>
Subject: [PATCH v2 2/6] riscv/ftrace: Add dynamic function tracer support

We now have dynamic ftrace with the following added items:

* ftrace_make_call, ftrace_make_nop (in kernel/ftrace.c)
  The two functions turns any recorded call site of filtered functions
  into a call to ftrace_caller or nops

* ftracce_update_ftrace_func (in kernel/ftrace.c)
  turns the nops at ftrace_call into a call to a generic entry for
  function tracers.

* ftrace_caller (in kernel/mcount-dyn.S)
  The entry where each _mcount call sites calls to once they are
  filtered to be traced.

Also, this patch fixes the semantic problems in mcount.S, which will be
treated as only a reference implementation once we have the dynamic
ftrace.

Cc: Greentime Hu <greentime@...estech.com>
Signed-off-by: Alan Kao <alankao@...estech.com>
---
 arch/riscv/Kconfig              |   1 +
 arch/riscv/include/asm/ftrace.h |  45 ++++++++++++++++++
 arch/riscv/kernel/Makefile      |   5 +-
 arch/riscv/kernel/ftrace.c      | 100 +++++++++++++++++++++++++++++++++++++++-
 arch/riscv/kernel/mcount-dyn.S  |  52 +++++++++++++++++++++
 arch/riscv/kernel/mcount.S      |  22 +++++----
 6 files changed, 213 insertions(+), 12 deletions(-)
 create mode 100644 arch/riscv/kernel/mcount-dyn.S

diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 1f45a76f412e..a5c5c88700d4 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -118,6 +118,7 @@ config ARCH_RV64I
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FTRACE_MCOUNT_RECORD
+	select HAVE_DYNAMIC_FTRACE
 
 endchoice
 
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 66d4175eb13e..acf0c7d001f3 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -8,3 +8,48 @@
 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER)
 #define HAVE_FUNCTION_GRAPH_FP_TEST
 #endif
+
+#ifndef __ASSEMBLY__
+void _mcount(void);
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+	return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * A general call in RISC-V is a pair of insts:
+ * 1) auipc: setting high-20 pc-related bits to ra register
+ * 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
+ *          return address (original pc + 4)
+ *
+ * Dynamic ftrace generates probes to call sites, so we must deal with
+ * both auipc and jalr at the same time.
+ */
+
+#define MCOUNT_ADDR		((unsigned long)_mcount)
+#define JALR_SIGN_MASK		(0x00000800)
+#define JALR_OFFSET_MASK	(0x00000fff)
+#define AUIPC_OFFSET_MASK	(0xfffff000)
+#define AUIPC_PAD		(0x00001000)
+#define JALR_SHIFT		20
+#define JALR_BASIC		(0x000080e7)
+#define AUIPC_BASIC		(0x00000097)
+#define NOP4			(0x00000013)
+
+#define to_jalr_insn(_offset) \
+	(((_offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_BASIC)
+
+#define to_auipc_insn(_offset) ((_offset & JALR_SIGN_MASK) ? \
+	(((_offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_BASIC) : \
+	((_offset & AUIPC_OFFSET_MASK) | AUIPC_BASIC))
+
+/*
+ * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
+ */
+#define MCOUNT_INSN_SIZE 8
+#endif
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 196f62ffc428..d7bdf888f1ca 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -34,7 +34,8 @@ CFLAGS_setup.o := -mcmodel=medany
 obj-$(CONFIG_SMP)		+= smpboot.o
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_MODULES)		+= module.o
-obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o
-obj-$(CONFIG_FUNCTION_GRAPH_TRACER)	+= ftrace.o
+
+obj-$(CONFIG_FUNCTION_TRACER)	+= mcount.o ftrace.o
+obj-$(CONFIG_DYNAMIC_FTRACE)	+= mcount-dyn.o
 
 clean:
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index d0de68d144cb..311c287433c9 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -6,9 +6,107 @@
  */
 
 #include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static int ftrace_check_current_call(unsigned long hook_pos,
+				     unsigned int *expected)
+{
+	unsigned int replaced[2];
+	unsigned int nops[2] = {NOP4, NOP4};
+
+	/* we expect nops at the hook position */
+	if (!expected)
+		expected = nops;
+
+	/*
+	 * Read the text we want to modify;
+	 * return must be -EFAULT on read error
+	 */
+	if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE))
+		return -EFAULT;
+
+	/*
+	 * Make sure it is what we expect it to be;
+	 * return must be -EINVAL on failed comparison
+	 */
+	if (memcmp(expected, replaced, sizeof(replaced))) {
+		pr_err("%p: expected (%08x %08x) but get (%08x %08x)",
+		       (void *)hook_pos, expected[0], expected[1], replaced[0],
+		       replaced[1]);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
+				bool enable)
+{
+	unsigned int offset = (unsigned int)(target - hook_pos);
+	unsigned int auipc_call = to_auipc_insn(offset);
+	unsigned int jalr_call = to_jalr_insn(offset);
+	unsigned int calls[2] = {auipc_call, jalr_call};
+	unsigned int nops[2] = {NOP4, NOP4};
+	int ret = 0;
+
+	/* when ftrace_make_nop is called */
+	if (!enable)
+		ret = ftrace_check_current_call(hook_pos, calls);
+
+	if (ret)
+		return ret;
+
+	/* replace the auipc-jalr pair at once */
+	ret = probe_kernel_write((void *)hook_pos, enable ? calls : nops,
+				 MCOUNT_INSN_SIZE);
+	/* return must be -EPERM on write error */
+	if (ret)
+		return -EPERM;
+
+	smp_mb();
+	flush_icache_range((void *)hook_pos, (void *)hook_pos + MCOUNT_INSN_SIZE);
+
+	return 0;
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+	int ret = ftrace_check_current_call(rec->ip, NULL);
+
+	if (ret)
+		return ret;
+
+	return __ftrace_modify_call(rec->ip, addr, true);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+		    unsigned long addr)
+{
+	return __ftrace_modify_call(rec->ip, addr, false);
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
+				       (unsigned long)func, true);
+	if (!ret) {
+		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
+					   (unsigned long)func, true);
+	}
+
+	return ret;
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+	return 0;
+}
+#endif
 
 /*
- * Most of this file is copied from arm64.
+ * Most of this function is copied from arm64.
  */
 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 			   unsigned long frame_pointer)
diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
new file mode 100644
index 000000000000..57f80fe09cbd
--- /dev/null
+++ b/arch/riscv/kernel/mcount-dyn.S
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm-generic/export.h>
+#include <asm/ftrace.h>
+
+	.text
+
+	.macro SAVE_ABI_STATE
+	addi	sp, sp, -16
+	sd	s0, 0(sp)
+	sd	ra, 8(sp)
+	addi	s0, sp, 16
+	.endm
+
+	.macro RESTORE_ABI_STATE
+	ld	ra, 8(sp)
+	ld	s0, 0(sp)
+	addi	sp, sp, 16
+	.endm
+
+ENTRY(ftrace_caller)
+	/*
+	 * a0: the address in the caller when calling ftrace_caller
+	 * a1: the caller's return address
+	 */
+	ld	a1, -8(s0)
+	addi	a0, ra, -MCOUNT_INSN_SIZE
+	SAVE_ABI_STATE
+ftrace_call:
+	.global ftrace_call
+	/*
+	 * For the dynamic ftrace to work, here we should reserve at least
+	 * 8 bytes for a functional auipc-jalr pair. Pseudo inst nop may be
+	 * interpreted as different length in different models, so we manually
+	 * *expand* two 4-byte nops here.
+	 *
+	 * Calling ftrace_update_ftrace_func would overwrite the nops below.
+	 * Check ftrace_modify_all_code for details.
+	 */
+	addi	x0, x0, 0
+	addi	x0, x0, 0
+	RESTORE_ABI_STATE
+	ret
+ENDPROC(ftrace_caller)
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index c46a778627be..ce9bdc57a2a1 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -32,13 +32,13 @@
 	addi	s0, sp, 32
 	.endm
 
-	.macro STORE_ABI_STATE
+	.macro RESTORE_ABI_STATE
 	ld	ra, 8(sp)
 	ld	s0, 0(sp)
 	addi	sp, sp, 16
 	.endm
 
-	.macro STORE_RET_ABI_STATE
+	.macro RESTORE_RET_ABI_STATE
 	ld	ra, 24(sp)
 	ld	s0, 16(sp)
 	ld	a0, 8(sp)
@@ -46,6 +46,10 @@
 	.endm
 
 ENTRY(ftrace_stub)
+#ifdef CONFIG_DYNAMIC_FTRACE
+       .global _mcount
+       .set    _mcount, ftrace_stub
+#endif
 	ret
 ENDPROC(ftrace_stub)
 
@@ -66,15 +70,15 @@ ENTRY(return_to_handler)
 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 	mv	a0, t6
 #endif
-	la	t0, ftrace_return_to_handler
-	jalr	t0
+	call	ftrace_return_to_handler
 	mv	a1, a0
-	STORE_RET_ABI_STATE
+	RESTORE_RET_ABI_STATE
 	jalr	a1
 ENDPROC(return_to_handler)
 EXPORT_SYMBOL(return_to_handler)
 #endif
 
+#ifndef CONFIG_DYNAMIC_FTRACE
 ENTRY(_mcount)
 	la	t4, ftrace_stub
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -104,9 +108,8 @@ do_ftrace_graph_caller:
 	ld	a2, -16(s0)
 #endif
 	SAVE_ABI_STATE
-	la	t0, prepare_ftrace_return
-	jalr	t0
-	STORE_ABI_STATE
+	call	prepare_ftrace_return
+	RESTORE_ABI_STATE
 	ret
 #endif
 
@@ -120,7 +123,8 @@ do_trace:
 
 	SAVE_ABI_STATE
 	jalr	t5
-	STORE_ABI_STATE
+	RESTORE_ABI_STATE
 	ret
 ENDPROC(_mcount)
 EXPORT_SYMBOL(_mcount)
+#endif
-- 
2.15.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ