[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20220104014236.1512639-1-houtao1@huawei.com>
Date: Tue, 4 Jan 2022 09:42:36 +0800
From: Hou Tao <houtao1@...wei.com>
To: Alexei Starovoitov <ast@...nel.org>
CC: Martin KaFai Lau <kafai@...com>, Yonghong Song <yhs@...com>,
Daniel Borkmann <daniel@...earbox.net>,
Andrii Nakryiko <andrii@...nel.org>,
"David S . Miller" <davem@...emloft.net>,
Jakub Kicinski <kuba@...nel.org>, <netdev@...r.kernel.org>,
<bpf@...r.kernel.org>, <houtao1@...wei.com>,
Zi Shen Lim <zlim.lnx@...il.com>,
Catalin Marinas <catalin.marinas@....com>,
Will Deacon <will@...nel.org>,
<linux-arm-kernel@...ts.infradead.org>
Subject: [PATCH bpf] bpf, arm64: calculate offset as byte-offset for bpf line info
The bpf line info for arm64 is broken due to two reasons:
(1) insn_to_jit_off passed to bpf_prog_fill_jited_linfo() is
calculated in instruction granularity instead of bytes
granularity.
(2) insn_to_jit_off only considers the body itself and ignores
prologue before the body.
So fix it by calculating offset as byte-offset and do build_prologue()
first in the first JIT pass.
Fixes: 37ab566c178d ("bpf: arm64: Enable arm64 jit to provide bpf_line_info")
Signed-off-by: Hou Tao <houtao1@...wei.com>
---
arch/arm64/net/bpf_jit_comp.c | 27 +++++++++++++++++----------
1 file changed, 17 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 148ca51325bb..d7a6d4b523c9 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -24,6 +24,8 @@
#include "bpf_jit.h"
+#define INSN_SZ (sizeof(u32))
+
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
@@ -154,10 +156,11 @@ static inline int bpf2a64_offset(int bpf_insn, int off,
bpf_insn++;
/*
* Whereas arm64 branch instructions encode the offset
- * from the branch itself, so we must subtract 1 from the
+ * from the branch itself, so we must subtract 4 from the
* instruction offset.
*/
- return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
+ return (ctx->offset[bpf_insn + off] -
+ (ctx->offset[bpf_insn] - INSN_SZ)) / INSN_SZ;
}
static void jit_fill_hole(void *area, unsigned int size)
@@ -955,13 +958,14 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
+ /* BPF line info needs byte-offset instead of insn-offset */
if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
+ ctx->offset[i] = ctx->idx * INSN_SZ;
ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) {
i++;
if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
+ ctx->offset[i] = ctx->idx * INSN_SZ;
continue;
}
if (ret)
@@ -973,7 +977,7 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
* instruction (end of program)
*/
if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
+ ctx->offset[i] = ctx->idx * INSN_SZ;
return 0;
}
@@ -1058,15 +1062,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
- /* 1. Initial fake pass to compute ctx->idx. */
-
- /* Fake pass to fill in ctx->offset. */
- if (build_body(&ctx, extra_pass)) {
+ /*
+ * 1. Initial fake pass to compute ctx->idx and ctx->offset.
+ *
+ * BPF line info needs ctx->offset[i] to be the byte offset
+ * of instruction[i] in jited image, so build prologue first.
+ */
+ if (build_prologue(&ctx, was_classic)) {
prog = orig_prog;
goto out_off;
}
- if (build_prologue(&ctx, was_classic)) {
+ if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
goto out_off;
}
--
2.27.0
Powered by blists - more mailing lists