lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 23 Feb 2018 17:41:36 +0000
From:   Edward Cree <ecree@...arflare.com>
To:     netdev <netdev@...r.kernel.org>,
        Alexei Starovoitov <ast@...nel.org>,
        Daniel Borkmann <daniel@...earbox.net>
Subject: [RFC PATCH bpf-next 08/12] bpf/verifier: selftests for bounded loops

Mainly consists of tests that broke (or I expected to break) earlier
 versions of the bounded-loop handling.
Also updated some existing tests to deal with changed error messages,
 programs now being accepted etc.

Signed-off-by: Edward Cree <ecree@...arflare.com>
---
 tools/testing/selftests/bpf/test_verifier.c | 198 +++++++++++++++++++++++++++-
 1 file changed, 191 insertions(+), 7 deletions(-)

diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 722a16b2e9c4..fda35a5a0ff9 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -9338,7 +9338,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
-		.errstr = "frames is too deep",
+		.errstr = "recursive call",
 		.result = REJECT,
 	},
 	{
@@ -9389,8 +9389,8 @@ static struct bpf_test tests[] = {
 			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
 		},
 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
-		.errstr = "back-edge from insn",
-		.result = REJECT,
+		.result = ACCEPT,
+		.retval = 1,
 	},
 	{
 		"calls: conditional call 4",
@@ -9424,8 +9424,8 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
-		.errstr = "back-edge from insn",
-		.result = REJECT,
+		.result = ACCEPT,
+		.retval = 1,
 	},
 	{
 		"calls: conditional call 6",
@@ -9666,7 +9666,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
-		.errstr = "frames is too deep",
+		.errstr = "recursive call",
 		.result = REJECT,
 	},
 	{
@@ -9678,7 +9678,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
-		.errstr = "frames is too deep",
+		.errstr = "recursive call",
 		.result = REJECT,
 	},
 	{
@@ -11135,6 +11135,190 @@ static struct bpf_test tests[] = {
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
 	},
+	{
+		"bounded loop, count to 4",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.retval = 4,
+	},
+	{
+		"bounded loop, count to 20",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 20, -2),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "r0 is increasing too slowly",
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
+	{
+		"bounded loop, count from positive unknown to 4",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JSLT, BPF_REG_0, 0, 2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.retval = 4,
+	},
+	{
+		"bounded loop, count from totally unknown to 4",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.result = REJECT,
+		.errstr = "r0 is not increasing",
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
+	{
+		"bounded loop, count to 4 with equality",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, -2),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "loop with this opcode not supported",
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
+	{
+		"bounded loop, start in the middle",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_JMP_A(1),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.retval = 4,
+	},
+	{
+		"bounded loop containing a forward jump",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -3),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.retval = 4,
+	},
+	{
+		"bounded loop that jumps out rather than in",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, 1),
+			BPF_JMP_A(-3),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.result = REJECT,
+		.errstr = "loop on conditional fallthrough",
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
+	{
+		"infinite loop after a conditional jump",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 5),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, 2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_JMP_A(-2),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "back-edge from insn 3 to 2",
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
+	{
+		"bounded recursion",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 4, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "recursive call",
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
+	{
+		"infinite loop in two jumps",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_JMP_A(0),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "loop variable r0 is not increasing",
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
+	{
+		"infinite loop: three-jump trick",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, -11),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "loop from insn 11 to 1 ceased to be bounded",
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ