[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250714173915.b9edd474742de46bcbe9c617@kernel.org>
Date: Mon, 14 Jul 2025 17:39:15 +0900
From: Masami Hiramatsu (Google) <mhiramat@...nel.org>
To: Jiri Olsa <jolsa@...nel.org>
Cc: Oleg Nesterov <oleg@...hat.com>, Peter Zijlstra <peterz@...radead.org>,
Andrii Nakryiko <andrii@...nel.org>, bpf@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-trace-kernel@...r.kernel.org,
x86@...nel.org, Song Liu <songliubraving@...com>, Yonghong Song
<yhs@...com>, John Fastabend <john.fastabend@...il.com>, Hao Luo
<haoluo@...gle.com>, Steven Rostedt <rostedt@...dmis.org>, Masami Hiramatsu
<mhiramat@...nel.org>, Alan Maguire <alan.maguire@...cle.com>, David Laight
<David.Laight@...LAB.COM>, Thomas Weißschuh
<thomas@...ch.de>, Ingo Molnar <mingo@...nel.org>
Subject: Re: [PATCHv5 perf/core 09/22] uprobes/x86: Add uprobe syscall to
speed up uprobe
On Fri, 11 Jul 2025 10:29:17 +0200
Jiri Olsa <jolsa@...nel.org> wrote:
> Adding new uprobe syscall that calls uprobe handlers for given
> 'breakpoint' address.
>
> The idea is that the 'breakpoint' address calls the user space
> trampoline which executes the uprobe syscall.
>
> The syscall handler reads the return address of the initial call
> to retrieve the original 'breakpoint' address. With this address
> we find the related uprobe object and call its consumers.
>
> Adding the arch_uprobe_trampoline_mapping function that provides
> uprobe trampoline mapping. This mapping is backed with one global
> page initialized at __init time and shared by the all the mapping
> instances.
>
> We do not allow to execute uprobe syscall if the caller is not
> from uprobe trampoline mapping.
>
> The uprobe syscall ensures the consumer (bpf program) sees registers
> values in the state before the trampoline was called.
>
> Acked-by: Andrii Nakryiko <andrii@...nel.org>
> Acked-by: Oleg Nesterov <oleg@...hat.com>
> Signed-off-by: Jiri Olsa <jolsa@...nel.org>
> ---
> arch/x86/entry/syscalls/syscall_64.tbl | 1 +
> arch/x86/kernel/uprobes.c | 122 +++++++++++++++++++++++++
> include/linux/syscalls.h | 2 +
> include/linux/uprobes.h | 1 +
> kernel/events/uprobes.c | 17 ++++
> kernel/sys_ni.c | 1 +
> 6 files changed, 144 insertions(+)
>
> diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
> index cfb5ca41e30d..9fd1291e7bdf 100644
> --- a/arch/x86/entry/syscalls/syscall_64.tbl
> +++ b/arch/x86/entry/syscalls/syscall_64.tbl
> @@ -345,6 +345,7 @@
> 333 common io_pgetevents sys_io_pgetevents
> 334 common rseq sys_rseq
> 335 common uretprobe sys_uretprobe
> +336 common uprobe sys_uprobe
> # don't use numbers 387 through 423, add new calls after the last
> # 'common' entry
> 424 common pidfd_send_signal sys_pidfd_send_signal
> diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
> index 6c4dcbdd0c3c..5eecab712376 100644
> --- a/arch/x86/kernel/uprobes.c
> +++ b/arch/x86/kernel/uprobes.c
> @@ -752,6 +752,128 @@ void arch_uprobe_clear_state(struct mm_struct *mm)
> hlist_for_each_entry_safe(tramp, n, &state->head_tramps, node)
> destroy_uprobe_trampoline(tramp);
> }
> +
> +static bool __in_uprobe_trampoline(unsigned long ip)
> +{
> + struct vm_area_struct *vma = vma_lookup(current->mm, ip);
> +
> + return vma && vma_is_special_mapping(vma, &tramp_mapping);
> +}
> +
> +static bool in_uprobe_trampoline(unsigned long ip)
> +{
> + struct mm_struct *mm = current->mm;
> + bool found, retry = true;
> + unsigned int seq;
> +
> + rcu_read_lock();
> + if (mmap_lock_speculate_try_begin(mm, &seq)) {
> + found = __in_uprobe_trampoline(ip);
> + retry = mmap_lock_speculate_retry(mm, seq);
> + }
> + rcu_read_unlock();
> +
> + if (retry) {
> + mmap_read_lock(mm);
> + found = __in_uprobe_trampoline(ip);
> + mmap_read_unlock(mm);
> + }
> + return found;
> +}
> +
> +SYSCALL_DEFINE0(uprobe)
> +{
> + struct pt_regs *regs = task_pt_regs(current);
> + unsigned long ip, sp, ax_r11_cx_ip[4];
> + int err;
> +
> + /* Allow execution only from uprobe trampolines. */
> + if (!in_uprobe_trampoline(regs->ip))
> + goto sigill;
> +
/*
* When syscall from the trampoline, including a call to the trampoline
* the stack will be shown as;
* regs->sp[0]: [rax]
* [1]: [r11]
* [2]: [rcx]
* [3]: [return-address] (probed address + sizeof(call-instruction))
*
* And the `®s->sp[4]` should be the `sp` value when probe is hit.
*/
> + err = copy_from_user(ax_r11_cx_ip, (void __user *)regs->sp, sizeof(ax_r11_cx_ip));
> + if (err)
> + goto sigill;
> +
> + ip = regs->ip;
> +
> + /*
> + * expose the "right" values of ax/r11/cx/ip/sp to uprobe_consumer/s, plus:
> + * - adjust ip to the probe address, call saved next instruction address
> + * - adjust sp to the probe's stack frame (check trampoline code)
> + */
> + regs->ax = ax_r11_cx_ip[0];
> + regs->r11 = ax_r11_cx_ip[1];
> + regs->cx = ax_r11_cx_ip[2];
> + regs->ip = ax_r11_cx_ip[3] - 5;
> + regs->sp += sizeof(ax_r11_cx_ip);
> + regs->orig_ax = -1;
> +
> + sp = regs->sp;
> +
> + handle_syscall_uprobe(regs, regs->ip);
> +
> + /*
> + * Some of the uprobe consumers has changed sp, we can do nothing,
> + * just return via iret.
> + */
Do we allow consumers to change the `sp`? It seems dangerous
because consumer needs to know whether it is called from
breakpoint or syscall. Note that it has to set up ax, r11
and cx on the stack correctly only if it is called from syscall,
that is not compatible with breakpoint mode.
> + if (regs->sp != sp)
> + return regs->ax;
Shouldn't we recover regs->ip? Or in this case does consumer has
to change ip (== return address from trampline) too?
IMHO, it should not allow to change the `sp` and `ip` directly
in syscall mode. In case of kprobes, kprobe jump optimization
must be disabled explicitly (e.g. setting dummy post_handler)
if the handler changes `ip`.
Or, even if allowing to modify `sp` and `ip`, it should be helped
by this function, e.g. stack up the dummy regs->ax/r11/cx on the
new stack at the new `regs->sp`. This will allow modifying those
registries transparently as same as breakpoint mode.
In this case, I think we just need to remove above 2 lines.
> +
> + regs->sp -= sizeof(ax_r11_cx_ip);
> +
> + /* for the case uprobe_consumer has changed ax/r11/cx */
> + ax_r11_cx_ip[0] = regs->ax;
> + ax_r11_cx_ip[1] = regs->r11;
> + ax_r11_cx_ip[2] = regs->cx;
> +
> + /* keep return address unless we are instructed otherwise */
> + if (ax_r11_cx_ip[3] - 5 != regs->ip)
> + ax_r11_cx_ip[3] = regs->ip;
> +
> + regs->ip = ip;
> +
> + err = copy_to_user((void __user *)regs->sp, ax_r11_cx_ip, sizeof(ax_r11_cx_ip));
> + if (err)
> + goto sigill;
... because above does everything what we need.
Thank you,
> +
> + /* ensure sysret, see do_syscall_64() */
> + regs->r11 = regs->flags;
> + regs->cx = regs->ip;
> + return 0;
> +
> +sigill:
> + force_sig(SIGILL);
> + return -1;
> +}
> +
> +asm (
> + ".pushsection .rodata\n"
> + ".balign " __stringify(PAGE_SIZE) "\n"
> + "uprobe_trampoline_entry:\n"
> + "push %rcx\n"
> + "push %r11\n"
> + "push %rax\n"
> + "movq $" __stringify(__NR_uprobe) ", %rax\n"
> + "syscall\n"
> + "pop %rax\n"
> + "pop %r11\n"
> + "pop %rcx\n"
> + "ret\n"
> + ".balign " __stringify(PAGE_SIZE) "\n"
> + ".popsection\n"
> +);
> +
> +extern u8 uprobe_trampoline_entry[];
> +
> +static int __init arch_uprobes_init(void)
> +{
> + tramp_mapping_pages[0] = virt_to_page(uprobe_trampoline_entry);
> + return 0;
> +}
> +
> +late_initcall(arch_uprobes_init);
> +
> #else /* 32-bit: */
> /*
> * No RIP-relative addressing on 32-bit
> diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
> index e5603cc91963..b0cc60f1c458 100644
> --- a/include/linux/syscalls.h
> +++ b/include/linux/syscalls.h
> @@ -998,6 +998,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
>
> asmlinkage long sys_uretprobe(void);
>
> +asmlinkage long sys_uprobe(void);
> +
> /* pciconfig: alpha, arm, arm64, ia64, sparc */
> asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn,
> unsigned long off, unsigned long len,
> diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
> index b40d33aae016..b6b077cc7d0f 100644
> --- a/include/linux/uprobes.h
> +++ b/include/linux/uprobes.h
> @@ -239,6 +239,7 @@ extern unsigned long uprobe_get_trampoline_vaddr(void);
> extern void uprobe_copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len);
> extern void arch_uprobe_clear_state(struct mm_struct *mm);
> extern void arch_uprobe_init_state(struct mm_struct *mm);
> +extern void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr);
> #else /* !CONFIG_UPROBES */
> struct uprobes_state {
> };
> diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> index acec91a676b7..cbba31c0495f 100644
> --- a/kernel/events/uprobes.c
> +++ b/kernel/events/uprobes.c
> @@ -2772,6 +2772,23 @@ static void handle_swbp(struct pt_regs *regs)
> rcu_read_unlock_trace();
> }
>
> +void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr)
> +{
> + struct uprobe *uprobe;
> + int is_swbp;
> +
> + guard(rcu_tasks_trace)();
> +
> + uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp);
> + if (!uprobe)
> + return;
> + if (!get_utask())
> + return;
> + if (arch_uprobe_ignore(&uprobe->arch, regs))
> + return;
> + handler_chain(uprobe, regs);
> +}
> +
> /*
> * Perform required fix-ups and disable singlestep.
> * Allow pending signals to take effect.
> diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
> index c00a86931f8c..bf5d05c635ff 100644
> --- a/kernel/sys_ni.c
> +++ b/kernel/sys_ni.c
> @@ -392,3 +392,4 @@ COND_SYSCALL(setuid16);
> COND_SYSCALL(rseq);
>
> COND_SYSCALL(uretprobe);
> +COND_SYSCALL(uprobe);
> --
> 2.50.0
>
--
Masami Hiramatsu (Google) <mhiramat@...nel.org>
Powered by blists - more mailing lists