lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20180115064742.25324-4-alankao@andestech.com>
Date:   Mon, 15 Jan 2018 14:47:39 +0800
From:   Alan Kao <nonerkao@...il.com>
To:     Palmer Dabbelt <palmer@...ive.com>, Albert Ou <albert@...ive.com>,
        Christoph Hellwig <hch@....de>,
        Steven Rostedt <rostedt@...dmis.org>,
        Ingo Molnar <mingo@...hat.com>,
        Masahiro Yamada <yamada.masahiro@...ionext.com>,
        Kamil Rytarowski <n54@....com>,
        Andrew Morton <akpm@...ux-foundation.org>,
        patches@...ups.riscv.org, linux-kernel@...r.kernel.org
Cc:     Alan Kao <alankao@...estech.com>,
        Greentime Hu <greentime@...estech.com>
Subject: [PATCH v2 3/6] riscv/ftrace: Add dynamic function graph tracer support

Once the function_graph tracer is enabled, a filtered function has the
following call sequence:

* ftracer_caller         ==> on/off by ftrace_make_call/ftrace_make_nop
* ftrace_graph_caller
* ftrace_graph_call      ==> on/off by ftrace_en/disable_ftrace_graph_caller
* prepare_ftrace_return

Considering the following DYNAMIC_FTRACE_WITH_REGS feature, it would be
more extendable to have a ftrace_graph_caller function, instead of
calling prepare_ftrace_return directly in ftrace_caller.

Cc: Greentime Hu <greentime@...estech.com>
Signed-off-by: Alan Kao <alankao@...estech.com>
---
 arch/riscv/kernel/ftrace.c     | 25 +++++++++++++++-
 arch/riscv/kernel/mcount-dyn.S | 65 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 89 insertions(+), 1 deletion(-)

diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 311c287433c9..dce1286af9b0 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -51,7 +51,7 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
 	unsigned int nops[2] = {NOP4, NOP4};
 	int ret = 0;
 
-	/* when ftrace_make_nop is called */
+	/* for ftrace_make_nop and ftrace_disable_ftrace_graph_caller */
 	if (!enable)
 		ret = ftrace_check_current_call(hook_pos, calls);
 
@@ -105,6 +105,7 @@ int __init ftrace_dyn_arch_init(void)
 }
 #endif
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 /*
  * Most of this function is copied from arm64.
  */
@@ -137,3 +138,25 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 		return;
 	*parent = return_hooker;
 }
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+int ftrace_enable_ftrace_graph_caller(void)
+{
+	int ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
+					    NULL);
+
+	if (ret)
+		return ret;
+
+	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+				    (unsigned long)&prepare_ftrace_return, true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+	return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+				    (unsigned long)&prepare_ftrace_return, false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
index 57f80fe09cbd..64e715d4e180 100644
--- a/arch/riscv/kernel/mcount-dyn.S
+++ b/arch/riscv/kernel/mcount-dyn.S
@@ -14,18 +14,63 @@
 	.text
 
 	.macro SAVE_ABI_STATE
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	addi    sp, sp, -48
+	sd      s0, 32(sp)
+	sd      ra, 40(sp)
+	addi    s0, sp, 48
+	sd      t0, 24(sp)
+	sd      t1, 16(sp)
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+	sd      t2, 8(sp)
+#endif
+#else
 	addi	sp, sp, -16
 	sd	s0, 0(sp)
 	sd	ra, 8(sp)
 	addi	s0, sp, 16
+#endif
 	.endm
 
 	.macro RESTORE_ABI_STATE
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	ld	s0, 32(sp)
+	ld	ra, 40(sp)
+	addi	sp, sp, 48
+#else
 	ld	ra, 8(sp)
 	ld	s0, 0(sp)
 	addi	sp, sp, 16
+#endif
 	.endm
 
+	.macro RESTORE_GRAPH_ARGS
+	ld	a0, 24(sp)
+	ld	a1, 16(sp)
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+	ld	a2, 8(sp)
+#endif
+	.endm
+
+ENTRY(ftrace_graph_caller)
+	addi	sp, sp, -16
+	sd	s0, 0(sp)
+	sd	ra, 8(sp)
+	addi	s0, sp, 16
+ftrace_graph_call:
+	.global ftrace_graph_call
+	/*
+	 * Calling ftrace_enable/disable_ftrace_graph_caller would overwrite the
+	 * nops below.  Check ftrace_modify_all_code for details.
+	 */
+	addi	x0, x0, 0
+	addi	x0, x0, 0
+	ld	ra, 8(sp)
+	ld	s0, 0(sp)
+	addi	sp, sp, 16
+	ret
+ENDPROC(ftrace_graph_caller)
+
 ENTRY(ftrace_caller)
 	/*
 	 * a0: the address in the caller when calling ftrace_caller
@@ -33,6 +78,20 @@ ENTRY(ftrace_caller)
 	 */
 	ld	a1, -8(s0)
 	addi	a0, ra, -MCOUNT_INSN_SIZE
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	/*
+	 * the graph tracer (specifically, prepare_ftrace_return) needs these
+	 * arguments but for now the function tracer occupies the regs, so we
+	 * save them in temporary regs to recover later.
+	 */
+	addi	t0, s0, -8
+	mv	t1, a0
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+	ld	t2, -16(s0)
+#endif
+#endif
+
 	SAVE_ABI_STATE
 ftrace_call:
 	.global ftrace_call
@@ -47,6 +106,12 @@ ftrace_call:
 	 */
 	addi	x0, x0, 0
 	addi	x0, x0, 0
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	RESTORE_GRAPH_ARGS
+	call	ftrace_graph_caller
+#endif
+
 	RESTORE_ABI_STATE
 	ret
 ENDPROC(ftrace_caller)
-- 
2.15.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ