[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1556880164-10689-8-git-send-email-jiong.wang@netronome.com>
Date: Fri, 3 May 2019 11:42:34 +0100
From: Jiong Wang <jiong.wang@...ronome.com>
To: alexei.starovoitov@...il.com, daniel@...earbox.net
Cc: bpf@...r.kernel.org, netdev@...r.kernel.org,
oss-drivers@...ronome.com, Jiong Wang <jiong.wang@...ronome.com>
Subject: [PATCH v6 bpf-next 07/17] bpf: verifier: randomize high 32-bit when BPF_F_TEST_RND_HI32 is set
This patch randomizes high 32-bit of a definition when BPF_F_TEST_RND_HI32
is set.
It does this once the flag set no matter there is hardware zero extension
support or not. Because this is a test feature and we want to deliver the
most stressful test.
Suggested-by: Alexei Starovoitov <ast@...nel.org>
Signed-off-by: Jiong Wang <jiong.wang@...ronome.com>
---
kernel/bpf/verifier.c | 69 +++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 58 insertions(+), 11 deletions(-)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 999da02..31ffbef 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -7648,32 +7648,79 @@ static int opt_remove_nops(struct bpf_verifier_env *env)
return 0;
}
-static int opt_subreg_zext_lo32(struct bpf_verifier_env *env)
+static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
+ const union bpf_attr *attr)
{
+ struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
struct bpf_insn_aux_data *aux = env->insn_aux_data;
+ int i, patch_len, delta = 0, len = env->prog->len;
struct bpf_insn *insns = env->prog->insnsi;
- int i, delta = 0, len = env->prog->len;
- struct bpf_insn zext_patch[2];
struct bpf_prog *new_prog;
+ bool rnd_hi32;
+
+ rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
zext_patch[1] = BPF_ALU32_IMM(BPF_ZEXT, 0, 0);
+ rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
+ rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
+ rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
for (i = 0; i < len; i++) {
int adj_idx = i + delta;
struct bpf_insn insn;
- if (!aux[adj_idx].zext_dst)
+ insn = insns[adj_idx];
+ if (!aux[adj_idx].zext_dst) {
+ u8 code, class;
+ u32 imm_rnd;
+
+ if (!rnd_hi32)
+ continue;
+
+ code = insn.code;
+ class = BPF_CLASS(code);
+ if (insn_no_def(&insn))
+ continue;
+
+ /* NOTE: arg "reg" (the fourth one) is only used for
+ * BPF_STX which has been ruled out in above
+ * check, it is safe to pass NULL here.
+ */
+ if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
+ if (class == BPF_LD &&
+ BPF_MODE(code) == BPF_IMM)
+ i++;
+ continue;
+ }
+
+ /* ctx load could be transformed into wider load. */
+ if (class == BPF_LDX &&
+ aux[adj_idx].ptr_type == PTR_TO_CTX)
+ continue;
+
+ imm_rnd = get_random_int();
+ rnd_hi32_patch[0] = insn;
+ rnd_hi32_patch[1].imm = imm_rnd;
+ rnd_hi32_patch[3].dst_reg = insn.dst_reg;
+ patch = rnd_hi32_patch;
+ patch_len = 4;
+ goto apply_patch_buffer;
+ }
+
+ if (bpf_jit_hardware_zext())
continue;
- insn = insns[adj_idx];
zext_patch[0] = insn;
zext_patch[1].dst_reg = insn.dst_reg;
- new_prog = bpf_patch_insn_data(env, adj_idx, zext_patch, 2);
+ patch = zext_patch;
+ patch_len = 2;
+apply_patch_buffer:
+ new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
if (!new_prog)
return -ENOMEM;
env->prog = new_prog;
insns = new_prog->insnsi;
aux = env->insn_aux_data;
- delta += 2;
+ delta += patch_len - 1;
}
return 0;
@@ -8533,10 +8580,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
/* do 32-bit optimization after insn patching has done so those patched
* insns could be handled correctly.
*/
- if (ret == 0 && !bpf_jit_hardware_zext() &&
- !bpf_prog_is_dev_bound(env->prog->aux)) {
- ret = opt_subreg_zext_lo32(env);
- env->prog->aux->verifier_zext = !ret;
+ if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
+ ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
+ env->prog->aux->verifier_zext =
+ bpf_jit_hardware_zext() ? false : !ret;
}
if (ret == 0)
--
2.7.4
Powered by blists - more mailing lists