lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1510471708-12729-4-git-send-email-yupeng0921@gmail.com>
Date:   Sun, 12 Nov 2017 07:28:27 +0000
From:   yupeng0921@...il.com
To:     linux-kernel@...r.kernel.org
Cc:     ast@...nel.org, daniel@...earbox.net, rostedt@...dmis.org,
        mingo@...hat.com, yupeng0921@...il.com
Subject: [ftrace-bpf 4/5] enable bpf filter for graph trace in x86-64 arch

define FTRACE_BPF_FILTER if CONFIG_FTRACE_BPF_FILTER is enabled,
create struct ftrace_regs, struct ftrace_regs is similar as pt_regs in
kprobe, but ftrace doesn't save all context, only caller save
registers, so use ftrace_regs to store these registers.

Signed-off-by: yupeng0921@...il.com
---
 arch/x86/include/asm/ftrace.h | 22 ++++++++++++++++++++++
 arch/x86/kernel/ftrace.c      | 15 +++++++++++++++
 arch/x86/kernel/ftrace_64.S   |  1 +
 3 files changed, 38 insertions(+)

diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 09ad885..9a5bffc 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -11,6 +11,28 @@
 #endif
 #define MCOUNT_INSN_SIZE	5 /* sizeof mcount call */
 
+#ifndef __i386__
+#ifdef CONFIG_FTRACE_BPF_FILTER
+#define FTRACE_BPF_FILTER
+#ifndef __ASSEMBLY__
+/*
+ * The order is exactly same as
+ * arch/x86/entry/calling.h
+ */
+struct ftrace_regs {
+	unsigned long r9;
+	unsigned long r8;
+	unsigned long rax;
+	unsigned long rcx;
+	unsigned long rdx;
+	unsigned long rsi;
+	unsigned long rdi;
+};
+#endif
+#endif	/* CONFIG_FTRACE_BPF_FILTER */
+
+#endif
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 #define ARCH_SUPPORTS_FTRACE_OPS 1
 #endif
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 01ebcb6..d190534 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -896,8 +896,14 @@ static void *addr_from_call(void *ptr)
 	return ptr + MCOUNT_INSN_SIZE + calc.offset;
 }
 
+#ifdef FTRACE_BPF_FILTER
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
+			   unsigned long frame_pointer,
+			   struct ftrace_regs *ctx);
+#else
 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 			   unsigned long frame_pointer);
+#endif
 
 /*
  * If the ops->trampoline was not allocated, then it probably
@@ -989,8 +995,14 @@ int ftrace_disable_ftrace_graph_caller(void)
  * Hook the return address and push it in the stack of return addrs
  * in current thread info.
  */
+#ifdef FTRACE_BPF_FILTER
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
+			   unsigned long frame_pointer,
+			   struct ftrace_regs *ctx)
+#else
 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 			   unsigned long frame_pointer)
+#endif
 {
 	unsigned long old;
 	int faulted;
@@ -1048,6 +1060,9 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 
 	trace.func = self_addr;
 	trace.depth = current->curr_ret_stack + 1;
+#ifdef FTRACE_BPF_FILTER
+	trace.ctx = ctx;
+#endif
 
 	/* Only trace if the calling function expects to */
 	if (!ftrace_graph_entry(&trace)) {
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index c832291..5e51b93 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -108,6 +108,7 @@ EXPORT_SYMBOL(mcount)
 	movq MCOUNT_REG_SIZE-8(%rsp), %rdx
 	movq %rdx, RBP(%rsp)
 
+	leaq R9(%rsp), %rcx
 	/* Copy the parent address into %rsi (second parameter) */
 #ifdef CC_USING_FENTRY
 	movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ