lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [day] [month] [year] [list]
Message-Id: <1197926823.23402.30.camel@brick>
Date:	Mon, 17 Dec 2007 13:27:03 -0800
From:	Harvey Harrison <harvey.harrison@...il.com>
To:	Ingo Molnar <mingo@...e.hu>
Cc:	Ananth N Mavinakayanahalli <ananth@...ibm.com>,
	Jim Keniston <jkenisto@...ibm.com>,
	Roland McGrath <roland@...hat.com>,
	Arjan van de Ven <arjan@...radead.org>,
	prasanna@...ibm.com, anil.s.keshavamurthy@...el.com,
	davem@...emloft.net, systemtap-ml <systemtap@...rces.redhat.com>,
	LKML <linux-kernel@...r.kernel.org>,
	Andrew Morton <akpm@...ux-foundation.org>
Subject: [PATCH 3/4] x86: add kprobe-booster to X86_64

Based on X86_32, mostly by un-ifdeffing code.

Based on patch from Masami Hiramatsu <mhiramat@...hat.com>

Signed-off-by: Harvey Harrison <harvey.harrison@...il.com>
---
 arch/x86/kernel/kprobes.c |   57 +++++++++++++++++++++++----------------------
 include/asm-x86/kprobes.h |   12 +++++----
 2 files changed, 36 insertions(+), 33 deletions(-)

diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 64c702c..47bae2c 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -151,15 +151,17 @@ twobyte_has_modrm[256 / (sizeof(unsigned long) * 8)] = {
 #undef R4
 #undef RF
 
-/* insert a jmp code */
+/*
+ * Insert a jump instruction at address 'from' which jumps to address 'to' */
 static inline void set_jmp_op(void *from, void *to)
 {
 	struct __arch_jmp_op {
 		char op;
-		long raddr;
-	} __attribute__((packed)) *jop;
+		s32 raddr;
+	} __attribute__((packed)) * jop;
 	jop = (struct __arch_jmp_op *)from;
-	jop->raddr = (long)(to) - ((long)(from) + 5);
+
+	jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
 	jop->op = RELATIVEJUMP_INSTRUCTION;
 }
 
@@ -183,6 +185,9 @@ retry:
 	}
 
 	switch (opcode & 0xf0) {
+#ifdef X86_64
+	case 0x40:
+		goto retry; /* REX prefix is boostable */
 	case 0x60:
 		if (0x63 < opcode && opcode < 0x67)
 			goto retry; /* prefixes */
@@ -202,7 +207,7 @@ retry:
 	case 0xf0:
 		if ((opcode & 0x0c) == 0 && opcode != 0xf1)
 			goto retry; /* lock/rep(ne) prefix */
-		/* clear and set flags can be boost */
+		/* clear and set flags are boostable */
 		return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
 	default:
 		if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
@@ -221,6 +226,10 @@ static s32 __kprobes *is_riprel(u8 *insn)
 {
 	int need_modrm;
 
+#ifdef CONFIG_X86_32
+	return NULL;
+#endif
+
 	/* Skip legacy instruction prefixes.  */
 	while (1) {
 		switch (*insn) {
@@ -266,18 +275,10 @@ static s32 __kprobes *is_riprel(u8 *insn)
 
 static void __kprobes arch_copy_kprobe(struct kprobe *p)
 {
-#ifdef CONFIG_X86_32
-	memcpy(p->ainsn.insn, p->addr,
-	       (MAX_INSN_SIZE + 1) * sizeof(kprobe_opcode_t));
-	p->opcode = *p->addr;
-	if (can_boost(p->addr)) {
-		p->ainsn.boostable = 0;
-	} else {
-		p->ainsn.boostable = -1;
-	}
-#else
 	s32 *ripdisp;
-	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
+	memcpy(p->ainsn.insn, p->addr,
+	       MAX_INSN_SIZE + sizeof(kprobe_opcode_t));
+
 	ripdisp = is_riprel(p->ainsn.insn);
 	if (ripdisp) {
 		/*
@@ -297,8 +298,13 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
 		BUG_ON((s64) (s32) disp != disp); /* Sanity check.  */
 		*ripdisp = disp;
 	}
+
 	p->opcode = *p->addr;
-#endif
+	if (can_boost(p->addr)) {
+		p->ainsn.boostable = 0;
+	} else {
+		p->ainsn.boostable = -1;
+	}
 }
 
 /*
@@ -343,11 +349,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
 	mutex_lock(&kprobe_mutex);
-#ifdef CONFIG_X86_32
 	free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
-#else
-	free_insn_slot(p->ainsn.insn, 0);
-#endif
 	mutex_unlock(&kprobe_mutex);
 }
 
@@ -544,7 +546,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
 		return 1;
 
 ss_probe:
-#if defined(CONFIG_X86_32) && (!defined(CONFIG_PREEMPT) || defined(CONFIG_PM))
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
 	if (p->ainsn.boostable == 1 && !p->post_handler){
 		/* Boost up -- we can execute copied instructions directly */
 		reset_current_kprobe();
@@ -722,6 +724,11 @@ void *__kprobes trampoline_handler(struct pt_regs *regs)
  * that is atop the stack is the address following the copied instruction.
  * We need to make it the address following the original instruction.
  *
+ * If this is the first time we've single-stepped the instruction at
+ * this probepoint, and the instruction is boostable, boost it: add a
+ * jump instruction after the copied instruction, that jumps to the next
+ * instruction after the probepoint.
+ *
  * This function also checks instruction size for preparing direct execution.
  */
 static void __kprobes resume_execution(struct kprobe *p,
@@ -754,10 +761,8 @@ static void __kprobes resume_execution(struct kprobe *p,
 	case 0xcb:
 	case 0xcf:
 	case 0xea:		/* jmp absolute -- ip is correct */
-#ifdef CONFIG_X86_32
 		/* ip is already adjusted, no more changes required */
 		p->ainsn.boostable = 1;
-#endif
 		goto no_change;
 	case 0xe8:		/* call relative - Fix return addr */
 		*tos = orig_ip + (*tos - copy_ip);
@@ -777,10 +782,8 @@ static void __kprobes resume_execution(struct kprobe *p,
 		} else if (((insn[1] & 0x31) == 0x20) ||	/* jmp near, absolute indirect */
 			   ((insn[1] & 0x31) == 0x21)) {	/* jmp far, absolute indirect */
 			/* ip is correct. */
-#ifdef CONFIG_X86_32
 			/* And this is boostable */
 			p->ainsn.boostable = 1;
-#endif
 			goto no_change;
 		}
 		break;
@@ -788,7 +791,6 @@ static void __kprobes resume_execution(struct kprobe *p,
 		break;
 	}
 
-#ifdef CONFIG_X86_32
 	if (p->ainsn.boostable == 0) {
 		if ((regs->ip > copy_ip) &&
 		    (regs->ip - copy_ip) + 5 < (MAX_INSN_SIZE + 1)) {
@@ -803,7 +805,6 @@ static void __kprobes resume_execution(struct kprobe *p,
 			p->ainsn.boostable = -1;
 		}
 	}
-#endif
 	regs->ip = orig_ip + (regs->ip - copy_ip);
 
 no_change:
diff --git a/include/asm-x86/kprobes.h b/include/asm-x86/kprobes.h
index 7319c62..f9a4fd2 100644
--- a/include/asm-x86/kprobes.h
+++ b/include/asm-x86/kprobes.h
@@ -58,13 +58,15 @@ void kretprobe_trampoline(void);
 struct arch_specific_insn {
 	/* copy of the original instruction */
 	kprobe_opcode_t *insn;
-#ifdef CONFIG_X86_32
 	/*
-	 * If this flag is not 0, this kprobe can be boost when its
-	 * post_handler and break_handler is not set.
+	 * boostable = -1: This instruction type is not boostable.
+	 * boostable = 0: This instruction type is boostable.
+	 * boostable = 1: This instruction has been boosted: we have
+	 * added a relative jump after the instruction copy in insn,
+	 * so no single-step and fixup are needed (unless there's
+	 * a post_handler or break_handler).
 	 */
-	int boostable;
-#endif
+	 int boostable;
 };
 
 struct prev_kprobe {
-- 
1.5.4.rc0.1083.gf568


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ