[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250611035952.111182-4-duanchenghao@kylinos.cn>
Date: Wed, 11 Jun 2025 11:59:50 +0800
From: Chenghao Duan <duanchenghao@...inos.cn>
To: ast@...nel.org,
daniel@...earbox.net,
andrii@...nel.org,
martin.lau@...ux.dev,
eddyz87@...il.com,
song@...nel.org,
yonghong.song@...ux.dev,
john.fastabend@...il.com,
kpsingh@...nel.org,
sdf@...ichev.me,
haoluo@...gle.com,
jolsa@...nel.org,
yangtiezhu@...ngson.cn,
hengqi.chen@...il.com,
chenhuacai@...nel.org,
kernel@...0n.name,
linux-kernel@...r.kernel.org
Cc: guodongtai@...inos.cn,
duanchenghao@...inos.cn,
youling.tang@...ux.dev,
jianghaoran@...inos.cn
Subject: [PATCH v1 3/5] LoongArch: BPF: Add bpf_arch_text_poke support for Loongarch
Implement the bpf_arch_text_poke function for the LoongArch
architecture. On LoongArch, since symbol addresses in the direct mapping
region cannot be reached via relative jump instructions from the paged
mapping region, we use the move_imm+jirl instruction pair as absolute
jump instructions. These require 2-5 instructions, so we reserve 5 NOP
instructions in the program as placeholders for function jumps.
Signed-off-by: George Guo <guodongtai@...inos.cn>
Signed-off-by: Chenghao Duan <duanchenghao@...inos.cn>
---
arch/loongarch/net/bpf_jit.c | 64 ++++++++++++++++++++++++++++++++++++
1 file changed, 64 insertions(+)
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index ea357a3ed..0e31d4d66 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
+#include <linux/memory.h>
#include "bpf_jit.h"
#define REG_TCC LOONGARCH_GPR_A6
@@ -1351,3 +1352,66 @@ bool bpf_jit_supports_subprog_tailcalls(void)
{
return true;
}
+
+static int emit_jump_and_link(struct jit_ctx *ctx, u8 rd, u64 ip, u64 target)
+{
+ s64 offset = (s64)(target - ip);
+
+ if (offset && (offset >= -SZ_128M && offset < SZ_128M)) {
+ emit_insn(ctx, bl, offset >> 2);
+ } else {
+ move_imm(ctx, LOONGARCH_GPR_T1, target, false);
+ emit_insn(ctx, jirl, rd, LOONGARCH_GPR_T1, 0);
+ }
+
+ return 0;
+}
+
+static int gen_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
+{
+ s64 off = 0;
+ struct jit_ctx ctx;
+
+ ctx.idx = 0;
+ ctx.image = (union loongarch_instruction *)insns;
+
+ if (!target) {
+ emit_insn((&ctx), NOP);
+ emit_insn((&ctx), NOP);
+ return 0;
+ }
+
+ off = (s64)(target - ip);
+ return emit_jump_and_link(&ctx, is_call ? LOONGARCH_GPR_T0 : LOONGARCH_GPR_ZERO,
+ (unsigned long)ip, (unsigned long)target);
+}
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
+ void *old_addr, void *new_addr)
+{
+ u32 old_insns[5] = {[0 ... 4] = INSN_NOP};
+ u32 new_insns[5] = {[0 ... 4] = INSN_NOP};
+ bool is_call = poke_type == BPF_MOD_CALL;
+ int ret;
+
+ if (!is_kernel_text((unsigned long)ip) &&
+ !is_bpf_text_address((unsigned long)ip))
+ return -ENOTSUPP;
+
+ ret = gen_jump_or_nops(old_addr, ip, old_insns, is_call);
+ if (ret)
+ return ret;
+
+ if (memcmp(ip, old_insns, 5 * 4))
+ return -EFAULT;
+
+ ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call);
+ if (ret)
+ return ret;
+
+ mutex_lock(&text_mutex);
+ if (memcmp(ip, new_insns, 5 * 4))
+ ret = larch_insn_text_copy(ip, new_insns, 5 * 4);
+ mutex_unlock(&text_mutex);
+ return ret;
+}
--
2.25.1
Powered by blists - more mailing lists