[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20191009132844.GG2359@hirez.programming.kicks-ass.net>
Date: Wed, 9 Oct 2019 15:28:44 +0200
From: Peter Zijlstra <peterz@...radead.org>
To: Masami Hiramatsu <mhiramat@...nel.org>
Cc: linux-kernel@...r.kernel.org, x86@...nel.org,
Nadav Amit <nadav.amit@...il.com>,
Andy Lutomirski <luto@...nel.org>,
Dave Hansen <dave.hansen@...ux.intel.com>,
Song Liu <songliubraving@...com>,
Steven Rostedt <rostedt@...dmis.org>,
Daniel Bristot de Oliveira <bristot@...hat.com>,
paulmck@...nel.org
Subject: Re: x86/kprobes bug? (was: [PATCH 1/3] x86/alternatives: Teach
text_poke_bp() to emulate instructions)
On Wed, Oct 09, 2019 at 03:26:39PM +0200, Peter Zijlstra wrote:
> So I suppose I'm suggesting we do something like the below on top of
> what I already have here:
>
> git://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git x86/ftrace
>
> All it needs are a few comments ;-) Also note how this nicely gets rid
> of the one text_poke_bp(.emulate) user, so we can go and remove that as
> well.
>
> ---
> arch/x86/include/asm/text-patching.h | 1 +
> arch/x86/kernel/alternative.c | 11 ++++++++---
> arch/x86/kernel/kprobes/core.c | 1 +
> arch/x86/kernel/kprobes/opt.c | 12 ++++--------
> 4 files changed, 14 insertions(+), 11 deletions(-)
>
> diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
> index d553175212b3..d3269558e5b5 100644
> --- a/arch/x86/include/asm/text-patching.h
> +++ b/arch/x86/include/asm/text-patching.h
> @@ -42,6 +42,7 @@ extern void text_poke_early(void *addr, const void *opcode, size_t len);
> * an inconsistent instruction while you patch.
> */
> extern void *text_poke(void *addr, const void *opcode, size_t len);
> +extern void text_poke_sync(void);
> extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
> extern int poke_int3_handler(struct pt_regs *regs);
> extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
> diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
> index 34a08bc68e9a..9e81ab542190 100644
> --- a/arch/x86/kernel/alternative.c
> +++ b/arch/x86/kernel/alternative.c
> @@ -936,6 +936,11 @@ static void do_sync_core(void *info)
> sync_core();
> }
>
> +void text_poke_sync(void)
> +{
> + on_each_cpu(do_sync_core, NULL, 1);
> +}
> +
> struct text_poke_loc {
> s32 rel_addr; /* addr := _stext + rel_addr */
> s32 rel32;
> @@ -1089,7 +1094,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
> for (i = 0; i < nr_entries; i++)
> text_poke(text_poke_addr(&tp[i]), &int3, sizeof(int3));
>
> - on_each_cpu(do_sync_core, NULL, 1);
> + text_poke_sync();
>
> /*
> * Second step: update all but the first byte of the patched range.
> @@ -1111,7 +1116,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
> * not necessary and we'd be safe even without it. But
> * better safe than sorry (plus there's not only Intel).
> */
> - on_each_cpu(do_sync_core, NULL, 1);
> + text_poke_sync();
> }
>
> /*
> @@ -1127,7 +1132,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
> }
>
> if (do_sync)
> - on_each_cpu(do_sync_core, NULL, 1);
> + text_poke_sync();
>
> /*
> * sync_core() implies an smp_mb() and orders this store against
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7e4a8a1c9d9a..04858ea7cd76 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -498,11 +498,13 @@ int arch_prepare_kprobe(struct kprobe *p)
void arch_arm_kprobe(struct kprobe *p)
{
text_poke(p->addr, ((unsigned char []){INT3_INSN_OPCODE}), 1);
+ text_poke_sync();
}
void arch_disarm_kprobe(struct kprobe *p)
{
text_poke(p->addr, &p->opcode, 1);
+ text_poke_sync();
}
void arch_remove_kprobe(struct kprobe *p)
> void arch_remove_kprobe(struct kprobe *p)
> diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
> index 36d7249f2145..30a2646cfc8a 100644
> --- a/arch/x86/kernel/kprobes/opt.c
> +++ b/arch/x86/kernel/kprobes/opt.c
> @@ -446,14 +446,10 @@ void arch_optimize_kprobes(struct list_head *oplist)
> /* Replace a relative jump with a breakpoint (int3). */
> void arch_unoptimize_kprobe(struct optimized_kprobe *op)
> {
> - u8 insn_buff[JMP32_INSN_SIZE];
> -
> - /* Set int3 to first byte for kprobes */
> - insn_buff[0] = INT3_INSN_OPCODE;
> - memcpy(insn_buff + 1, op->optinsn.copied_insn, DISP32_SIZE);
> -
> - text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE,
> - text_gen_insn(JMP32_INSN_OPCODE, op->kp.addr, op->optinsn.insn));
> + arch_arm_kprobe(&op->kp);
> + text_poke(op->kp.addr + INT3_INSN_SIZE,
> + op->optinsn.copied_insn, DISP32_SIZE);
> + text_poke_sync();
> }
>
> /*
Powered by blists - more mailing lists