[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAEyhmHTA+6RdD4CbQuMn2E887Z3E6RudJQb3Wnmqosj1ozrXPw@mail.gmail.com>
Date: Thu, 26 Jun 2025 09:39:04 +0800
From: Hengqi Chen <hengqi.chen@...il.com>
To: Chenghao Duan <duanchenghao@...inos.cn>
Cc: ast@...nel.org, daniel@...earbox.net, andrii@...nel.org,
yangtiezhu@...ngson.cn, chenhuacai@...nel.org, martin.lau@...ux.dev,
eddyz87@...il.com, song@...nel.org, yonghong.song@...ux.dev,
john.fastabend@...il.com, kpsingh@...nel.org, sdf@...ichev.me,
haoluo@...gle.com, jolsa@...nel.org, kernel@...0n.name,
linux-kernel@...r.kernel.org, loongarch@...ts.linux.dev, bpf@...r.kernel.org,
guodongtai@...inos.cn, youling.tang@...ux.dev, jianghaoran@...inos.cn,
Youling Tang <tangyouling@...inos.cn>
Subject: Re: [PATCH v2 1/4] LoongArch: BPF: The operation commands needed to
add a trampoline
On Wed, Jun 18, 2025 at 6:51 PM Chenghao Duan <duanchenghao@...inos.cn> wrote:
>
> Add branch jump function:
> larch_insn_gen_beq
> larch_insn_gen_bne
>
> Add instruction copy function: larch_insn_text_copy
>
Please rewrite the commit message properly.
These functions are generic, so you can drop the `BPF` prefix from subject line.
> Co-developed-by: George Guo <guodongtai@...inos.cn>
> Signed-off-by: George Guo <guodongtai@...inos.cn>
> Co-developed-by: Youling Tang <tangyouling@...inos.cn>
> Signed-off-by: Youling Tang <tangyouling@...inos.cn>
> Signed-off-by: Chenghao Duan <duanchenghao@...inos.cn>
> ---
> arch/loongarch/include/asm/inst.h | 3 ++
> arch/loongarch/kernel/inst.c | 57 +++++++++++++++++++++++++++++++
> 2 files changed, 60 insertions(+)
>
> diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
> index 3089785ca..88bb73e46 100644
> --- a/arch/loongarch/include/asm/inst.h
> +++ b/arch/loongarch/include/asm/inst.h
> @@ -497,6 +497,7 @@ void arch_simulate_insn(union loongarch_instruction insn, struct pt_regs *regs);
> int larch_insn_read(void *addr, u32 *insnp);
> int larch_insn_write(void *addr, u32 insn);
> int larch_insn_patch_text(void *addr, u32 insn);
> +int larch_insn_text_copy(void *dst, void *src, size_t len);
>
> u32 larch_insn_gen_nop(void);
> u32 larch_insn_gen_b(unsigned long pc, unsigned long dest);
> @@ -511,6 +512,8 @@ u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm);
> u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm);
> u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
> u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
> +u32 larch_insn_gen_beq(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
> +u32 larch_insn_gen_bne(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm);
>
> static inline bool signed_imm_check(long val, unsigned int bit)
> {
> diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
> index 14d7d700b..7423b0772 100644
> --- a/arch/loongarch/kernel/inst.c
> +++ b/arch/loongarch/kernel/inst.c
> @@ -4,6 +4,7 @@
> */
> #include <linux/sizes.h>
> #include <linux/uaccess.h>
> +#include <linux/set_memory.h>
>
> #include <asm/cacheflush.h>
> #include <asm/inst.h>
> @@ -218,6 +219,34 @@ int larch_insn_patch_text(void *addr, u32 insn)
> return ret;
> }
>
> +int larch_insn_text_copy(void *dst, void *src, size_t len)
> +{
> + unsigned long flags;
Initialize flags ?
> + size_t wlen = 0;
> + size_t size;
> + void *ptr;
> + int ret = 0;
> +
> + set_memory_rw((unsigned long)dst, round_up(len, PAGE_SIZE) / PAGE_SIZE);
> + raw_spin_lock_irqsave(&patch_lock, flags);
> + while (wlen < len) {
> + ptr = dst + wlen;
> + size = min_t(size_t, PAGE_SIZE - offset_in_page(ptr),
> + len - wlen);
> +
> + ret = copy_to_kernel_nofault(ptr, src + wlen, size);
I am not familiar with this mm thing, but looking at other callsites
of copy_to_kernel_nofault(),
it seems like you can do this copy cross page boundaries.
> + if (ret) {
> + pr_err("%s: operation failed\n", __func__);
> + break;
> + }
> + wlen += size;
> + }
> + raw_spin_unlock_irqrestore(&patch_lock, flags);
> + set_memory_rox((unsigned long)dst, round_up(len, PAGE_SIZE) / PAGE_SIZE);
> +
Do we need flush_icache_range() here ?
> + return ret;
> +}
> +
> u32 larch_insn_gen_nop(void)
> {
> return INSN_NOP;
> @@ -336,3 +365,31 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
>
> return insn.word;
> }
> +
> +u32 larch_insn_gen_beq(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
> +{
> + union loongarch_instruction insn;
> +
> + if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) {
> + pr_warn("The generated beq instruction is out of range.\n");
> + return INSN_BREAK;
> + }
> +
> + emit_beq(&insn, rd, rj, imm >> 2);
> +
This does NOT match emit_beq's signature, should be:
emit_beq(&insn, rj, rd, imm >> 2);
> + return insn.word;
> +}
> +
> +u32 larch_insn_gen_bne(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
> +{
> + union loongarch_instruction insn;
> +
> + if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) {
> + pr_warn("The generated bne instruction is out of range.\n");
> + return INSN_BREAK;
> + }
> +
> + emit_bne(&insn, rj, rd, imm >> 2);
> +
> + return insn.word;
> +}
> --
> 2.43.0
>
Powered by blists - more mailing lists