[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1553623539-15474-8-git-send-email-jiong.wang@netronome.com>
Date: Tue, 26 Mar 2019 18:05:30 +0000
From: Jiong Wang <jiong.wang@...ronome.com>
To: alexei.starovoitov@...il.com, daniel@...earbox.net
Cc: bpf@...r.kernel.org, netdev@...r.kernel.org,
oss-drivers@...ronome.com, Jiong Wang <jiong.wang@...ronome.com>
Subject: [PATCH/RFC bpf-next 07/16] bpf: insert explicit zero extension instructions when bpf_jit_32bit_opt is true
This patch implements the zero extension insertion pass using
bpf_patch_insn_data infrastructure.
Once zero extensions are inserted, tell JIT back-ends about this through
the new field env boolean "no_verifier_zext". We need this because user
could enable or disable the insertion pass as they like through sysctl
variable.
Reviewed-by: Jakub Kicinski <jakub.kicinski@...ronome.com>
Signed-off-by: Jiong Wang <jiong.wang@...ronome.com>
---
include/linux/bpf.h | 1 +
kernel/bpf/verifier.c | 45 ++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 45 insertions(+), 1 deletion(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5616a58..3336f93 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -359,6 +359,7 @@ struct bpf_prog_aux {
u32 id;
u32 func_cnt; /* used by non-func prog as the number of func progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
+ bool no_verifier_zext; /* No zero extension insertion by verifier. */
bool offload_requested;
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 83448bb..57db451 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -7224,6 +7224,38 @@ static int opt_remove_nops(struct bpf_verifier_env *env)
return 0;
}
+static int opt_subreg_zext(struct bpf_verifier_env *env)
+{
+ struct bpf_insn_aux_data *aux = env->insn_aux_data;
+ int i, delta = 0, len = env->prog->len;
+ struct bpf_insn *insns = env->prog->insnsi;
+ struct bpf_insn zext_patch[3];
+ struct bpf_prog *new_prog;
+
+ zext_patch[1] = BPF_ALU64_IMM(BPF_LSH, 0, 32);
+ zext_patch[2] = BPF_ALU64_IMM(BPF_RSH, 0, 32);
+ for (i = 0; i < len; i++) {
+ struct bpf_insn insn;
+
+ if (!aux[i + delta].zext_dst)
+ continue;
+
+ insn = insns[i + delta];
+ zext_patch[0] = insn;
+ zext_patch[1].dst_reg = insn.dst_reg;
+ zext_patch[2].dst_reg = insn.dst_reg;
+ new_prog = bpf_patch_insn_data(env, i + delta, zext_patch, 3);
+ if (!new_prog)
+ return -ENOMEM;
+ env->prog = new_prog;
+ insns = new_prog->insnsi;
+ aux = env->insn_aux_data;
+ delta += 2;
+ }
+
+ return 0;
+}
+
/* convert load instructions that access fields of a context type into a
* sequence of instructions that access fields of the underlying structure:
* struct __sk_buff -> struct sk_buff
@@ -8022,7 +8054,18 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (ret == 0)
ret = check_max_stack_depth(env);
- /* instruction rewrites happen after this point */
+ /* Instruction rewrites happen after this point.
+ * For offload target, finalize hook has all aux insn info, do any
+ * customized work there.
+ */
+ if (ret == 0 && bpf_jit_32bit_opt &&
+ !bpf_prog_is_dev_bound(env->prog->aux)) {
+ ret = opt_subreg_zext(env);
+ env->prog->aux->no_verifier_zext = !!ret;
+ } else {
+ env->prog->aux->no_verifier_zext = true;
+ }
+
if (is_priv) {
if (ret == 0)
opt_hard_wire_dead_code_branches(env);
--
2.7.4
Powered by blists - more mailing lists