lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Tue, 16 Jun 2015 11:09:59 -0400
From:	William Cohen <wcohen@...hat.com>
To:	David Long <dave.long@...aro.org>,
	Catalin Marinas <catalin.marinas@....com>,
	Will Deacon <will.deacon@....com>,
	linux-arm-kernel@...ts.infradead.org,
	Russell King <linux@....linux.org.uk>
CC:	sandeepa.s.prabhu@...il.com,
	Steve Capper <steve.capper@...aro.org>,
	"Jon Medhurst (Tixy)" <tixy@...aro.org>,
	Masami Hiramatsu <masami.hiramatsu.pt@...achi.com>,
	Ananth N Mavinakayanahalli <ananth@...ibm.com>,
	Anil S Keshavamurthy <anil.s.keshavamurthy@...el.com>,
	davem@...emloft.net, Mark Brown <broonie@...nel.org>,
	linux-kernel@...r.kernel.org
Subject: Re: [PATCH v7 5/7] arm64: Add trampoline code for kretprobes

On 06/15/2015 03:07 PM, David Long wrote:
> From: William Cohen <wcohen@...hat.com>
> 
> The trampoline code is used by kretprobes to capture a return from a probed
> function.  This is done by saving the registers, calling the handler, and
> restoring the registers.  The code then returns to the roginal saved caller

s/roginal/original

> return address.  It is necessary to do this directly instead of using a
> software breakpoint because the code used in processing that breakpoint
> could itself be kprobe'd and cause a problematic reentry into the debug
> exception handler.
> 
> Signed-off-by: William Cohen <wcohen@...hat.com>
> Signed-off-by: David A. Long <dave.long@...aro.org>
> ---
>  arch/arm64/include/asm/kprobes.h  |  1 +
>  arch/arm64/kernel/kprobes-arm64.h | 41 +++++++++++++++++++++++++++++++++++++++
>  arch/arm64/kernel/kprobes.c       | 26 +++++++++++++++++++++++++
>  3 files changed, 68 insertions(+)
> 
> diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
> index af31c4d..d081f49 100644
> --- a/arch/arm64/include/asm/kprobes.h
> +++ b/arch/arm64/include/asm/kprobes.h
> @@ -58,5 +58,6 @@ int kprobe_exceptions_notify(struct notifier_block *self,
>  			     unsigned long val, void *data);
>  int kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr);
>  int kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr);
> +void kretprobe_trampoline(void);
>  
>  #endif /* _ARM_KPROBES_H */
> diff --git a/arch/arm64/kernel/kprobes-arm64.h b/arch/arm64/kernel/kprobes-arm64.h
> index ff8a55f..bdcfa62 100644
> --- a/arch/arm64/kernel/kprobes-arm64.h
> +++ b/arch/arm64/kernel/kprobes-arm64.h
> @@ -27,4 +27,45 @@ extern kprobes_pstate_check_t * const kprobe_condition_checks[16];
>  enum kprobe_insn __kprobes
>  arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi);
>  
> +#define SAVE_REGS_STRING\
> +	"	stp x0, x1, [sp, #16 * 0]\n"	\
> +	"	stp x2, x3, [sp, #16 * 1]\n"	\
> +	"	stp x4, x5, [sp, #16 * 2]\n"	\
> +	"	stp x6, x7, [sp, #16 * 3]\n"	\
> +	"	stp x8, x9, [sp, #16 * 4]\n"	\
> +	"	stp x10, x11, [sp, #16 * 5]\n"	\
> +	"	stp x12, x13, [sp, #16 * 6]\n"	\
> +	"	stp x14, x15, [sp, #16 * 7]\n"	\
> +	"	stp x16, x17, [sp, #16 * 8]\n"	\
> +	"	stp x18, x19, [sp, #16 * 9]\n"	\
> +	"	stp x20, x21, [sp, #16 * 10]\n"	\
> +	"	stp x22, x23, [sp, #16 * 11]\n"	\
> +	"	stp x24, x25, [sp, #16 * 12]\n"	\
> +	"	stp x26, x27, [sp, #16 * 13]\n"	\
> +	"	stp x28, x29, [sp, #16 * 14]\n"	\
> +	"	str x30,   [sp, #16 * 15]\n"    \
> +	"	mrs x0, nzcv\n"			\
> +	"	str x0, [sp, #8 * 33]\n"
> +
> +
> +#define RESTORE_REGS_STRING\
> +	"	ldr x0, [sp, #8 * 33]\n"	\
> +	"	msr nzcv, x0\n"			\
> +	"	ldp x0, x1, [sp, #16 * 0]\n"	\
> +	"	ldp x2, x3, [sp, #16 * 1]\n"	\
> +	"	ldp x4, x5, [sp, #16 * 2]\n"	\
> +	"	ldp x6, x7, [sp, #16 * 3]\n"	\
> +	"	ldp x8, x9, [sp, #16 * 4]\n"	\
> +	"	ldp x10, x11, [sp, #16 * 5]\n"	\
> +	"	ldp x12, x13, [sp, #16 * 6]\n"	\
> +	"	ldp x14, x15, [sp, #16 * 7]\n"	\
> +	"	ldp x16, x17, [sp, #16 * 8]\n"	\
> +	"	ldp x18, x19, [sp, #16 * 9]\n"	\
> +	"	ldp x20, x21, [sp, #16 * 10]\n"	\
> +	"	ldp x22, x23, [sp, #16 * 11]\n"	\
> +	"	ldp x24, x25, [sp, #16 * 12]\n"	\
> +	"	ldp x26, x27, [sp, #16 * 13]\n"	\
> +	"	ldp x28, x29, [sp, #16 * 14]\n"	\
> +	"	ldr x30,   [sp, #16 * 15]\n"
> +
>  #endif /* _ARM_KERNEL_KPROBES_ARM64_H */
> diff --git a/arch/arm64/kernel/kprobes.c b/arch/arm64/kernel/kprobes.c
> index 6255814..570218c 100644
> --- a/arch/arm64/kernel/kprobes.c
> +++ b/arch/arm64/kernel/kprobes.c
> @@ -560,6 +560,32 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
>  	return 0;
>  }
>  
> +/*
> + * When a retprobed function returns, this code saves registers and
> + * calls trampoline_handler() runs, which calls the kretprobe's handler.
> + */
> +static void __used __kprobes kretprobe_trampoline_holder(void)
> +{
> +	asm volatile (".global kretprobe_trampoline\n"
> +			"kretprobe_trampoline:\n"
> +			"sub sp, sp, %0\n"
> +			SAVE_REGS_STRING
> +			"mov x0, sp\n"
> +			"bl trampoline_probe_handler\n"
> +			/* Replace trampoline address in lr with actual
> +			   orig_ret_addr return address. */
> +			"str x0, [sp, #16 * 15]\n"
> +			RESTORE_REGS_STRING
> +			"add sp, sp, %0\n"
> +			"ret\n"
> +		      : : "I"(sizeof(struct pt_regs)) : "memory");
> +}
> +
> +static void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
> +{
> +	return (void *) 0;
> +}
> +
>  int __init arch_init_kprobes(void)
>  {
>  	return 0;
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ