lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Date:   Mon, 26 Feb 2018 11:41:47 +1100
From:   Stephen Rothwell <sfr@...b.auug.org.au>
To:     Daniel Borkmann <daniel@...earbox.net>,
        Alexei Starovoitov <ast@...nel.org>,
        Networking <netdev@...r.kernel.org>
Cc:     Linux-Next Mailing List <linux-next@...r.kernel.org>,
        Linux Kernel Mailing List <linux-kernel@...r.kernel.org>
Subject: linux-next: manual merge of the bpf-next tree with the bpf tree

Hi all,

Today's linux-next merge of the bpf-next tree got a conflict in:

  tools/testing/selftests/bpf/test_verifier.c

between commit:

  ca36960211eb ("bpf: allow xadd only on aligned memory")

from the bpf tree and commit:

  23d191a82c13 ("bpf: add various jit test cases")

from the bpf-next tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc tools/testing/selftests/bpf/test_verifier.c
index 437c0b1c9d21,c987d3a2426f..000000000000
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@@ -11163,64 -11140,95 +11166,153 @@@ static struct bpf_test tests[] = 
  		.result = REJECT,
  		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
  	},
 +	{
 +		"xadd/w check unaligned stack",
 +		.insns = {
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
 +			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
 +			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = REJECT,
 +		.errstr = "misaligned stack access off",
 +		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 +	},
 +	{
 +		"xadd/w check unaligned map",
 +		.insns = {
 +			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
 +			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
 +			BPF_LD_MAP_FD(BPF_REG_1, 0),
 +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +				     BPF_FUNC_map_lookup_elem),
 +			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
 +			BPF_EXIT_INSN(),
 +			BPF_MOV64_IMM(BPF_REG_1, 1),
 +			BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
 +			BPF_EXIT_INSN(),
 +		},
 +		.fixup_map1 = { 3 },
 +		.result = REJECT,
 +		.errstr = "misaligned value access off",
 +		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 +	},
 +	{
 +		"xadd/w check unaligned pkt",
 +		.insns = {
 +			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
 +				    offsetof(struct xdp_md, data)),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 +				    offsetof(struct xdp_md, data_end)),
 +			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
 +			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
 +			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
 +			BPF_MOV64_IMM(BPF_REG_0, 99),
 +			BPF_JMP_IMM(BPF_JA, 0, 0, 6),
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
 +			BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
 +			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
 +			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
 +			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = REJECT,
 +		.errstr = "BPF_XADD stores into R2 packet",
 +		.prog_type = BPF_PROG_TYPE_XDP,
 +	},
+ 	{
+ 		"jit: lsh, rsh, arsh by 1",
+ 		.insns = {
+ 			BPF_MOV64_IMM(BPF_REG_0, 1),
+ 			BPF_MOV64_IMM(BPF_REG_1, 0xff),
+ 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
+ 			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
+ 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
+ 			BPF_EXIT_INSN(),
+ 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
+ 			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
+ 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
+ 			BPF_EXIT_INSN(),
+ 			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
+ 			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
+ 			BPF_EXIT_INSN(),
+ 			BPF_MOV64_IMM(BPF_REG_0, 2),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.retval = 2,
+ 	},
+ 	{
+ 		"jit: mov32 for ldimm64, 1",
+ 		.insns = {
+ 			BPF_MOV64_IMM(BPF_REG_0, 2),
+ 			BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
+ 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
+ 			BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
+ 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ 			BPF_MOV64_IMM(BPF_REG_0, 1),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.retval = 2,
+ 	},
+ 	{
+ 		"jit: mov32 for ldimm64, 2",
+ 		.insns = {
+ 			BPF_MOV64_IMM(BPF_REG_0, 1),
+ 			BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
+ 			BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
+ 			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+ 			BPF_MOV64_IMM(BPF_REG_0, 2),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.retval = 2,
+ 	},
+ 	{
+ 		"jit: various mul tests",
+ 		.insns = {
+ 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
+ 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
+ 			BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
+ 			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+ 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
+ 			BPF_MOV64_IMM(BPF_REG_0, 1),
+ 			BPF_EXIT_INSN(),
+ 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
+ 			BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
+ 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
+ 			BPF_MOV64_IMM(BPF_REG_0, 1),
+ 			BPF_EXIT_INSN(),
+ 			BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
+ 			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
+ 			BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
+ 			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
+ 			BPF_MOV64_IMM(BPF_REG_0, 1),
+ 			BPF_EXIT_INSN(),
+ 			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
+ 			BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
+ 			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
+ 			BPF_MOV64_IMM(BPF_REG_0, 1),
+ 			BPF_EXIT_INSN(),
+ 			BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
+ 			BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
+ 			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
+ 			BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
+ 			BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
+ 			BPF_MOV64_IMM(BPF_REG_0, 1),
+ 			BPF_EXIT_INSN(),
+ 			BPF_MOV64_IMM(BPF_REG_0, 2),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = ACCEPT,
+ 		.retval = 2,
+ 	},
+ 
  };
  
  static int probe_filter_length(const struct bpf_insn *fp)

Content of type "application/pgp-signature" skipped

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ