[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240520071631.2980798-1-xiao.w.wang@intel.com>
Date: Mon, 20 May 2024 15:16:31 +0800
From: Xiao Wang <xiao.w.wang@...el.com>
To: paul.walmsley@...ive.com,
palmer@...belt.com,
aou@...s.berkeley.edu,
luke.r.nels@...il.com,
xi.wang@...il.com,
bjorn@...nel.org
Cc: ast@...nel.org,
daniel@...earbox.net,
andrii@...nel.org,
martin.lau@...ux.dev,
eddyz87@...il.com,
song@...nel.org,
yonghong.song@...ux.dev,
john.fastabend@...il.com,
kpsingh@...nel.org,
sdf@...gle.com,
haoluo@...gle.com,
jolsa@...nel.org,
linux-riscv@...ts.infradead.org,
linux-kernel@...r.kernel.org,
bpf@...r.kernel.org,
pulehui@...wei.com,
haicheng.li@...el.com,
Xiao Wang <xiao.w.wang@...el.com>
Subject: [PATCH] riscv, bpf: Introduce shift add helper with Zba optimization
Zba extension is very useful for generating addresses that index into array
of basic data types. This patch introduces sh2add and sh3add helpers for
RV32 and RV64 respectively, to accelerate pointer array addressing.
Signed-off-by: Xiao Wang <xiao.w.wang@...el.com>
---
arch/riscv/net/bpf_jit.h | 33 +++++++++++++++++++++++++++++++++
arch/riscv/net/bpf_jit_comp32.c | 3 +--
arch/riscv/net/bpf_jit_comp64.c | 3 +--
3 files changed, 35 insertions(+), 4 deletions(-)
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index 18a7885ba95e..efe51821c463 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -740,6 +740,17 @@ static inline u16 rvc_swsp(u32 imm8, u8 rs2)
return rv_css_insn(0x6, imm, rs2, 0x2);
}
+/* RVZBA instrutions. */
+static inline u32 rvzba_sh2add(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x10, rs2, rs1, 0x4, rd, 0x33);
+}
+
+static inline u32 rvzba_sh3add(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x10, rs2, rs1, 0x6, rd, 0x33);
+}
+
/* RVZBB instrutions. */
static inline u32 rvzbb_sextb(u8 rd, u8 rs1)
{
@@ -1093,6 +1104,28 @@ static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
emit(rv_sw(rs1, off, rs2), ctx);
}
+static inline void emit_sh2add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvzba_enabled()) {
+ emit(rvzba_sh2add(rd, rs1, rs2), ctx);
+ return;
+ }
+
+ emit_slli(rd, rs1, 2, ctx);
+ emit_add(rd, rd, rs2, ctx);
+}
+
+static inline void emit_sh3add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvzba_enabled()) {
+ emit(rvzba_sh3add(rd, rs1, rs2), ctx);
+ return;
+ }
+
+ emit_slli(rd, rs1, 3, ctx);
+ emit_add(rd, rd, rs2, ctx);
+}
+
/* RV64-only helper functions. */
#if __riscv_xlen == 64
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
index f5ba73bb153d..592dd86fbf81 100644
--- a/arch/riscv/net/bpf_jit_comp32.c
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -811,8 +811,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
* if (!prog)
* goto out;
*/
- emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx);
- emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx);
+ emit_sh2add(RV_REG_T0, lo(idx_reg), lo(arr_reg), ctx);
off = offsetof(struct bpf_array, ptrs);
if (is_12b_check(off, insn))
return -1;
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index aac190085472..39149ad002da 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -374,8 +374,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
* if (!prog)
* goto out;
*/
- emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx);
- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx);
+ emit_sh3add(RV_REG_T2, RV_REG_A2, RV_REG_A1, ctx);
off = offsetof(struct bpf_array, ptrs);
if (is_12b_check(off, insn))
return -1;
base-commit: 92cce91949a497a8a4615f9ba5813b03f7a1f1d5
prerequisite-patch-id: f2b95de7f0f5ff170ecdf723154da7a61e1fc77e
--
2.25.1
Powered by blists - more mailing lists