lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 22 Apr 2022 12:54:02 +0200
From:   Jakub Sitnicki <jakub@...udflare.com>
To:     Xu Kuohai <xukuohai@...wei.com>
Cc:     bpf@...r.kernel.org, linux-arm-kernel@...ts.infradead.org,
        linux-kernel@...r.kernel.org, netdev@...r.kernel.org,
        linux-kselftest@...r.kernel.org,
        Catalin Marinas <catalin.marinas@....com>,
        Will Deacon <will@...nel.org>,
        Steven Rostedt <rostedt@...dmis.org>,
        Ingo Molnar <mingo@...hat.com>,
        Daniel Borkmann <daniel@...earbox.net>,
        Alexei Starovoitov <ast@...nel.org>,
        Zi Shen Lim <zlim.lnx@...il.com>,
        Andrii Nakryiko <andrii@...nel.org>,
        Martin KaFai Lau <kafai@...com>,
        Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>,
        John Fastabend <john.fastabend@...il.com>,
        KP Singh <kpsingh@...nel.org>,
        "David S . Miller" <davem@...emloft.net>,
        Hideaki YOSHIFUJI <yoshfuji@...ux-ipv6.org>,
        David Ahern <dsahern@...nel.org>,
        Thomas Gleixner <tglx@...utronix.de>,
        Borislav Petkov <bp@...en8.de>,
        Dave Hansen <dave.hansen@...ux.intel.com>, x86@...nel.org,
        hpa@...or.com, Shuah Khan <shuah@...nel.org>,
        Mark Rutland <mark.rutland@....com>,
        Ard Biesheuvel <ardb@...nel.org>,
        Pasha Tatashin <pasha.tatashin@...een.com>,
        Peter Collingbourne <pcc@...gle.com>,
        Daniel Kiss <daniel.kiss@....com>,
        Sudeep Holla <sudeep.holla@....com>,
        Steven Price <steven.price@....com>,
        Marc Zyngier <maz@...nel.org>, Mark Brown <broonie@...nel.org>,
        Kumar Kartikeya Dwivedi <memxor@...il.com>,
        Delyan Kratunov <delyank@...com>, kernel-team@...udflare.com
Subject: Re: [PATCH bpf-next v2 4/6] bpf, arm64: Impelment
 bpf_arch_text_poke() for arm64

Hi Xu,

Thanks for working on this.

We are also looking forward to using fentry hooks on arm64.
In particular, attaching to entry/exit into/from XDP progs.

On Thu, Apr 14, 2022 at 12:22 PM -04, Xu Kuohai wrote:
> Impelment bpf_arch_text_poke() for arm64, so bpf trampoline code can use
> it to replace nop with jump, or replace jump with nop.
>
> Signed-off-by: Xu Kuohai <xukuohai@...wei.com>
> Acked-by: Song Liu <songliubraving@...com>
> ---
>  arch/arm64/net/bpf_jit_comp.c | 52 +++++++++++++++++++++++++++++++++++
>  1 file changed, 52 insertions(+)
>
> diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
> index 8ab4035dea27..1a1c3ea75ee2 100644
> --- a/arch/arm64/net/bpf_jit_comp.c
> +++ b/arch/arm64/net/bpf_jit_comp.c
> @@ -9,6 +9,7 @@
>  
>  #include <linux/bitfield.h>
>  #include <linux/bpf.h>
> +#include <linux/memory.h>
>  #include <linux/filter.h>
>  #include <linux/printk.h>
>  #include <linux/slab.h>
> @@ -18,6 +19,7 @@
>  #include <asm/cacheflush.h>
>  #include <asm/debug-monitors.h>
>  #include <asm/insn.h>
> +#include <asm/patching.h>
>  #include <asm/set_memory.h>
>  
>  #include "bpf_jit.h"
> @@ -1529,3 +1531,53 @@ void bpf_jit_free_exec(void *addr)
>  {
>  	return vfree(addr);
>  }
> +
> +static int gen_branch_or_nop(enum aarch64_insn_branch_type type, void *ip,
> +			     void *addr, u32 *insn)
> +{
> +	if (!addr)
> +		*insn = aarch64_insn_gen_nop();
> +	else
> +		*insn = aarch64_insn_gen_branch_imm((unsigned long)ip,
> +						    (unsigned long)addr,
> +						    type);
> +
> +	return *insn != AARCH64_BREAK_FAULT ? 0 : -EFAULT;
> +}
> +
> +int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
> +		       void *old_addr, void *new_addr)
> +{
> +	int ret;
> +	u32 old_insn;
> +	u32 new_insn;
> +	u32 replaced;
> +	enum aarch64_insn_branch_type branch_type;
> +
> +	if (poke_type == BPF_MOD_CALL)
> +		branch_type = AARCH64_INSN_BRANCH_LINK;

This path, bpf_arch_text_poke(<ip>, BPF_MOD_CALL, ...), is what we hit
when attaching a BPF program entry. It is exercised by selftest #232
xdp_bpf2bpf.

However, with this patchset alone it will not work because we don't
emit, yet, the ftrace patch (MOV X9, LR; NOP) as a part of BPF prog
prologue, like ftrace_init_nop() does. So patching attempt will fail.

I think that is what you mentioned to in your reply to Hou [1]

So my question is - is support for attaching to BPF progs in scope for
this patchset?

If no, then perhaps it would be better for now to fail early with
something like -EOPNOTSUPP when poke_type is BPF_MOD_CALL, rather then
attempt to patch the code.

If you plan to enable it as a part of this patchset, then I've given it
a quick try, and it seems that not a lot is needed get fentry to BPF
attachment to work.

I'm including the diff for my quick and dirty attempt below. With that
patch on top, the xdp_bpf2bpf tests pass:

#232 xdp_bpf2bpf:OK

[1] https://lore.kernel.org/bpf/d8c4f1fb-a020-9457-44e2-dc63982a9213@huawei.com/

> +	else
> +		branch_type = AARCH64_INSN_BRANCH_NOLINK;
> +
> +	if (gen_branch_or_nop(branch_type, ip, old_addr, &old_insn) < 0)
> +		return -EFAULT;
> +
> +	if (gen_branch_or_nop(branch_type, ip, new_addr, &new_insn) < 0)
> +		return -EFAULT;
> +
> +	mutex_lock(&text_mutex);
> +	if (aarch64_insn_read(ip, &replaced)) {
> +		ret = -EFAULT;
> +		goto out;
> +	}
> +
> +	if (replaced != old_insn) {
> +		ret = -EFAULT;
> +		goto out;
> +	}
> +
> +	ret =  aarch64_insn_patch_text_nosync((void *)ip, new_insn);
> +out:
> +	mutex_unlock(&text_mutex);

The body of this critical section is identical as ftrace_modify_code().
Perhaps we could export it and reuse?

> +	return ret;
> +}

---
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 5f6bd755050f..94d8251500ab 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -240,9 +240,9 @@ static bool is_lsi_offset(int offset, int scale)
 /* Tail call offset to jump into */
 #if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) || \
 	IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)
-#define PROLOGUE_OFFSET 9
+#define PROLOGUE_OFFSET 11
 #else
-#define PROLOGUE_OFFSET 8
+#define PROLOGUE_OFFSET 10
 #endif
 
 static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
@@ -281,6 +281,10 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
 	 *
 	 */
 
+	/* Set up ftrace patch (initially in disabled state) */
+	emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
+	emit(A64_NOP, ctx);
+
 	/* Sign lr */
 	if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
 		emit(A64_PACIASP, ctx);
@@ -1888,10 +1892,16 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
 	u32 replaced;
 	enum aarch64_insn_branch_type branch_type;
 
-	if (poke_type == BPF_MOD_CALL)
+	if (poke_type == BPF_MOD_CALL) {
 		branch_type = AARCH64_INSN_BRANCH_LINK;
-	else
+		/*
+		 * Adjust addr to point at the BL in the callsite.
+		 * See ftrace_init_nop() for the callsite sequence.
+		 */
+		ip = (void *)((unsigned long)ip + AARCH64_INSN_SIZE);
+	} else {
 		branch_type = AARCH64_INSN_BRANCH_NOLINK;
+	}
 
 	if (gen_branch_or_nop(branch_type, ip, old_addr, &old_insn) < 0)
 		return -EFAULT;

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ