[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1553623539-15474-17-git-send-email-jiong.wang@netronome.com>
Date: Tue, 26 Mar 2019 18:05:39 +0000
From: Jiong Wang <jiong.wang@...ronome.com>
To: alexei.starovoitov@...il.com, daniel@...earbox.net
Cc: bpf@...r.kernel.org, netdev@...r.kernel.org,
oss-drivers@...ronome.com, Jiong Wang <jiong.wang@...ronome.com>
Subject: [PATCH/RFC bpf-next 16/16] selftests: bpf: unit testcases for zero extension insertion pass
This patch adds some unit testcases.
There are a couple of code paths inside verifier doing register read/write
marking, therefore are the places that could trigger zero extension
insertion logic. Create one test for each of them.
A couple of testcases for complex CFG also included. They cover register
read propagation during path pruning etc.
Signed-off-by: Jiong Wang <jiong.wang@...ronome.com>
---
tools/testing/selftests/bpf/verifier/zext.c | 651 ++++++++++++++++++++++++++++
1 file changed, 651 insertions(+)
create mode 100644 tools/testing/selftests/bpf/verifier/zext.c
diff --git a/tools/testing/selftests/bpf/verifier/zext.c b/tools/testing/selftests/bpf/verifier/zext.c
new file mode 100644
index 0000000..b45a429
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/zext.c
@@ -0,0 +1,651 @@
+/* There are a couple of code paths inside verifier doing register
+ * read/write marking. Create one test for each.
+ */
+{
+ "zext: basic 1",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "zext: basic 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -1,
+},
+{
+ "zext: basic 3",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "zext: basic 4",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0x300000001ULL),
+ BPF_MOV32_IMM(BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
+ BPF_MOV32_IMM(BPF_REG_2, 2),
+ BPF_JMP_REG(BPF_JSLE, BPF_REG_1, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0x300000001ULL),
+ BPF_MOV32_IMM(BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
+ BPF_MOV32_IMM(BPF_REG_2, 2),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+ BPF_JMP_REG(BPF_JSLE, BPF_REG_1, BPF_REG_2, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 3),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_0, 4),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 4,
+},
+{
+ "zext: basic 5",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0x100000001ULL),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_LD_IMM64(BPF_REG_1, 0x100000001ULL),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "zext: basic 6",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_MOV32_IMM(BPF_REG_1, 1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_MOV32_IMM(BPF_REG_1, 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "zext: ret from main",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 1,
+},
+{
+ "zext: ret from helper",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_8, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ /* Shouldn't do zext. */
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_8, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "zext: xadd",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 1,
+},
+{
+ "zext: ld_abs ind",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_LD_IMM64(BPF_REG_8, 0x100000000ULL),
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_8, 32),
+ BPF_LD_IND(BPF_B, BPF_REG_8, 0),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_LD_IMM64(BPF_REG_8, 0x100000000ULL),
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_8, 32),
+ },
+ .data = {
+ 10, 20, 30, 40, 50,
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 10,
+},
+{
+ "zext: multi paths, all 32-bit use",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_REG(BPF_REG_7, BPF_REG_8),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_REG(BPF_REG_7, BPF_REG_8),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ "zext: multi paths, partial 64-bit use",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_8, 0x100000001ULL),
+ BPF_MOV32_IMM(BPF_REG_8, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_LD_IMM64(BPF_REG_8, 0x100000001ULL),
+ BPF_MOV32_IMM(BPF_REG_8, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_8, 32),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "zext: multi paths, 32-bit def override",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_8),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_8, 32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ /* Diamond CFG
+ *
+ * -----
+ * | BB0 |
+ * -----
+ * /\
+ * / \
+ * / \
+ * ----- -----
+ * | BB1 | | BB2 | u32 def
+ * ----- -----
+ * \ /
+ * \ / -> pruned, but u64 read should propagate backward
+ * \ /
+ * -----
+ * | BB3 | u64 read
+ * -----
+ */
+ "zext: complex cfg 1",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+ /* BB1 */
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* BB2 */
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ /* BB3, 64-bit R8 read should be prop backward. */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+ /* BB1 */
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ /* BB2 */
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_8, 32),
+ /* BB3 */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ /* Diamond CFG
+ *
+ * -----
+ * | BB0 | u32 def
+ * -----
+ * /\
+ * / \
+ * / \
+ * ----- -----
+ * | BB1 | | BB2 | u32 def
+ * ----- -----
+ * \ /
+ * \ / -> pruned, but u64 read should propagate backward
+ * \ /
+ * -----
+ * | BB3 | u64 read
+ * -----
+ */
+ "zext: complex cfg 2",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_6, 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+ /* BB1 */
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* BB2 */
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ /* BB3, 64-bit R8 read should be prop backward. */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_6, 2),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
+ /* BB1 */
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ /* BB2 */
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_8, 32),
+ /* BB3 */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ /* Diamond CFG
+ *
+ * -----
+ * | BB0 | u32 def A
+ * -----
+ * /\
+ * / \
+ * / \
+ * ----- -----
+ * u64 def A | BB1 | | BB2 | u32 def B
+ * ----- -----
+ * \ /
+ * \ /
+ * \ /
+ * -----
+ * | BB3 | u64 read A and B
+ * -----
+ */
+ "zext: complex cfg 3",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_6, 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
+ /* BB1 */
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_MOV64_IMM(BPF_REG_6, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+ /* BB2 */
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ /* BB3, 64-bit R8 read should be prop backward. */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_6, 2),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
+ /* BB1 */
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_MOV64_IMM(BPF_REG_6, 2),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ /* BB2 */
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_8, 32),
+ /* BB3 */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 2,
+},
+{
+ /* Diamond CFG
+ *
+ * -----
+ * | BB0 | u32 def A
+ * -----
+ * /\
+ * / \
+ * / \
+ * ----- -----
+ * u64 def A | BB1 | | BB2 | u64 def A and u32 def B
+ * ----- -----
+ * \ /
+ * \ /
+ * \ /
+ * -----
+ * | BB3 | u64 read A and B
+ * -----
+ */
+ "zext: complex cfg 4",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_6, 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
+ /* BB1 */
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_MOV64_IMM(BPF_REG_6, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 3),
+ /* BB2 */
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_MOV64_IMM(BPF_REG_6, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ /* BB3, 64-bit R8 read should be prop backward. */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_6, 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
+ /* BB1 */
+ BPF_MOV64_IMM(BPF_REG_8, 2),
+ BPF_MOV64_IMM(BPF_REG_6, 3),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+ /* BB2 */
+ BPF_MOV32_IMM(BPF_REG_8, 2),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_8, 32),
+ BPF_MOV64_IMM(BPF_REG_6, 3),
+ /* BB3 */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 3,
+},
+{
+ "zext: callee-saved",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_6, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ /* callee */
+ BPF_MOV32_IMM(BPF_REG_6, 1),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ /* caller u32 def should be zero extended. */
+ BPF_MOV32_IMM(BPF_REG_6, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ /* u64 use. */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ /* callee u32 def shouldn't be affected. */
+ BPF_MOV32_IMM(BPF_REG_6, 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "zext: arg regs",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* callee */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ /* caller u32 def should be zero extended. */
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ /* u64 use. */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* callee u64 use on caller-saved reg. */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 0,
+},
+{
+ "zext: return arg",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ /* callee 1 */
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* callee 2 */
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .xlated_insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
+ BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_EXIT_INSN(),
+ /* callee 1 */
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ /* callee 2 */
+ BPF_MOV32_IMM(BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 32),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 3,
+},
--
2.7.4
Powered by blists - more mailing lists