lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 PHC | |
Open Source and information security mailing list archives
| ||
|
Date: Thu, 30 May 2019 21:30:33 +0100 From: Jiong Wang <jiong.wang@...ronome.com> To: Luke Nelson <luke.r.nels@...il.com> Cc: Xi Wang <xi.wang@...il.com>, Björn Töpel <bjorn.topel@...il.com>, Palmer Dabbelt <palmer@...ive.com>, Albert Ou <aou@...s.berkeley.edu>, Alexei Starovoitov <ast@...nel.org>, Daniel Borkmann <daniel@...earbox.net>, Martin KaFai Lau <kafai@...com>, Song Liu <songliubraving@...com>, Yonghong Song <yhs@...com>, netdev@...r.kernel.org, linux-riscv@...ts.infradead.org, bpf@...r.kernel.org, linux-kernel@...r.kernel.org Subject: Re: [PATCH 2/2] bpf: test_bpf: add tests for upper bits of 32-bit operations Luke Nelson writes: > This commit introduces tests that validate the upper 32 bits > of the result of 32-bit BPF ALU operations. > > The existing tests for 32-bit operations do not check the upper 32 > bits of results because the exit instruction truncates the result. > These tests perform a 32-bit ALU operation followed by a right shift. > These tests can catch subtle bugs in the extension behavior of JITed > instructions, including several bugs in the RISC-V BPF JIT, fixed in > another patch. Hi Luke, Have you seen the following? https://www.spinics.net/lists/netdev/msg573355.html it has been merged to bpf tree and should have full test coverage of all bpf insns that could write to sub-register and are exposed to JIT back-end. And AFAIK, we add new unit tests to test_verifier which is a userspace test infrastructure which offers more test functionality plus tests will go through verifier. Regards, Jiong > The added tests pass the JIT and interpreter on x86, as well as the > JIT and interpreter of RISC-V once the zero extension bugs were fixed. > > Cc: Xi Wang <xi.wang@...il.com> > Signed-off-by: Luke Nelson <luke.r.nels@...il.com> > --- > lib/test_bpf.c | 164 +++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 164 insertions(+) > > diff --git a/lib/test_bpf.c b/lib/test_bpf.c > index 0845f635f404..4580dc0220f1 100644 > --- a/lib/test_bpf.c > +++ b/lib/test_bpf.c > @@ -2461,6 +2461,20 @@ static struct bpf_test tests[] = { > { }, > { { 0, 1 } }, > }, > + { > + "ALU_ADD_X: (1 + 4294967294) >> 32 + 4294967294 = 4294967294", > + .u.insns_int = { > + BPF_LD_IMM64(R0, 1U), > + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), > + BPF_ALU32_REG(BPF_ADD, R0, R1), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU32_REG(BPF_ADD, R0, R1), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 4294967294U } }, > + }, > { > "ALU64_ADD_X: 1 + 2 = 3", > .u.insns_int = { > @@ -2812,6 +2826,20 @@ static struct bpf_test tests[] = { > { }, > { { 0, 1 } }, > }, > + { > + "ALU_SUB_X: (4294967295 - 1) >> 32 + 1 = 1", > + .u.insns_int = { > + BPF_LD_IMM64(R0, 4294967295U), > + BPF_ALU32_IMM(BPF_MOV, R1, 1U), > + BPF_ALU32_REG(BPF_SUB, R0, R1), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU32_REG(BPF_ADD, R0, R1), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 1 } }, > + }, > { > "ALU64_SUB_X: 3 - 1 = 2", > .u.insns_int = { > @@ -3391,6 +3419,20 @@ static struct bpf_test tests[] = { > { }, > { { 0, 0xffffffff } }, > }, > + { > + "ALU_AND_X: (-1 & -1) >> 32 + 1 = 1", > + .u.insns_int = { > + BPF_LD_IMM64(R0, -1UL), > + BPF_LD_IMM64(R1, -1UL), > + BPF_ALU32_REG(BPF_AND, R0, R1), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU32_IMM(BPF_ADD, R0, 1U), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 1 } }, > + }, > { > "ALU64_AND_X: 3 & 2 = 2", > .u.insns_int = { > @@ -3533,6 +3575,20 @@ static struct bpf_test tests[] = { > { }, > { { 0, 0xffffffff } }, > }, > + { > + "ALU_OR_X: (0 & -1) >> 32 + 1 = 1", > + .u.insns_int = { > + BPF_LD_IMM64(R0, 0), > + BPF_LD_IMM64(R1, -1UL), > + BPF_ALU32_REG(BPF_OR, R0, R1), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU32_IMM(BPF_ADD, R0, 1U), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 1 } }, > + }, > { > "ALU64_OR_X: 1 | 2 = 3", > .u.insns_int = { > @@ -3675,6 +3731,20 @@ static struct bpf_test tests[] = { > { }, > { { 0, 0xfffffffe } }, > }, > + { > + "ALU_XOR_X: (0 ^ -1) >> 32 + 1 = 1", > + .u.insns_int = { > + BPF_LD_IMM64(R0, 0), > + BPF_LD_IMM64(R1, -1UL), > + BPF_ALU32_REG(BPF_XOR, R0, R1), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU32_IMM(BPF_ADD, R0, 1U), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 1 } }, > + }, > { > "ALU64_XOR_X: 5 ^ 6 = 3", > .u.insns_int = { > @@ -3817,6 +3887,20 @@ static struct bpf_test tests[] = { > { }, > { { 0, 0x80000000 } }, > }, > + { > + "ALU_LSH_X: (1 << 31) >> 32 + 1 = 1", > + .u.insns_int = { > + BPF_LD_IMM64(R0, 1), > + BPF_ALU32_IMM(BPF_MOV, R1, 31), > + BPF_ALU32_REG(BPF_LSH, R0, R1), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU32_IMM(BPF_ADD, R0, 1), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 1 } }, > + }, > { > "ALU64_LSH_X: 1 << 1 = 2", > .u.insns_int = { > @@ -3842,6 +3926,19 @@ static struct bpf_test tests[] = { > { { 0, 0x80000000 } }, > }, > /* BPF_ALU | BPF_LSH | BPF_K */ > + { > + "ALU_LSH_K: (1 << 31) >> 32 + 1 = 1", > + .u.insns_int = { > + BPF_LD_IMM64(R0, 1), > + BPF_ALU32_IMM(BPF_LSH, R0, 31), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU32_IMM(BPF_ADD, R0, 1), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 1 } }, > + }, > { > "ALU_LSH_K: 1 << 1 = 2", > .u.insns_int = { > @@ -3911,6 +4008,20 @@ static struct bpf_test tests[] = { > { }, > { { 0, 1 } }, > }, > + { > + "ALU_RSH_X: (0x80000000 >> 0) >> 32 + 1 = 1", > + .u.insns_int = { > + BPF_LD_IMM64(R0, 0x80000000), > + BPF_ALU32_IMM(BPF_MOV, R1, 0), > + BPF_ALU32_REG(BPF_RSH, R0, R1), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU32_IMM(BPF_ADD, R0, 1), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 1 } }, > + }, > { > "ALU64_RSH_X: 2 >> 1 = 1", > .u.insns_int = { > @@ -3936,6 +4047,19 @@ static struct bpf_test tests[] = { > { { 0, 1 } }, > }, > /* BPF_ALU | BPF_RSH | BPF_K */ > + { > + "ALU_RSH_K: (0x80000000 >> 0) >> 32 + 1 = 1", > + .u.insns_int = { > + BPF_LD_IMM64(R0, 0x80000000), > + BPF_ALU32_IMM(BPF_RSH, R0, 0), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU32_IMM(BPF_ADD, R0, 1), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 1 } }, > + }, > { > "ALU_RSH_K: 2 >> 1 = 1", > .u.insns_int = { > @@ -3993,7 +4117,34 @@ static struct bpf_test tests[] = { > { }, > { { 0, 0xffff00ff } }, > }, > + { > + "ALU_ARSH_X: (0x80000000 >> 0) >> 32 + 1 = 1", > + .u.insns_int = { > + BPF_LD_IMM64(R0, 0x80000000), > + BPF_ALU32_IMM(BPF_MOV, R1, 0), > + BPF_ALU32_REG(BPF_ARSH, R0, R1), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU32_IMM(BPF_ADD, R0, 1), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 1 } }, > + }, > /* BPF_ALU | BPF_ARSH | BPF_K */ > + { > + "ALU_ARSH_K: (0x80000000 >> 0) >> 32 + 1 = 1", > + .u.insns_int = { > + BPF_LD_IMM64(R0, 0x80000000), > + BPF_ALU32_IMM(BPF_ARSH, R0, 0), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU32_IMM(BPF_ADD, R0, 1), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 1 } }, > + }, > { > "ALU_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", > .u.insns_int = { > @@ -4028,6 +4179,19 @@ static struct bpf_test tests[] = { > { }, > { { 0, 3 } }, > }, > + { > + "ALU_NEG: -(1) >> 32 + 1 = 1", > + .u.insns_int = { > + BPF_ALU32_IMM(BPF_MOV, R0, 1), > + BPF_ALU32_IMM(BPF_NEG, R0, 0), > + BPF_ALU64_IMM(BPF_RSH, R0, 32), > + BPF_ALU64_IMM(BPF_ADD, R0, 1), > + BPF_EXIT_INSN(), > + }, > + INTERNAL, > + { }, > + { { 0, 1 } }, > + }, > { > "ALU64_NEG: -(3) = -3", > .u.insns_int = {
Powered by blists - more mailing lists