lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20211001130348.3670534-7-johan.almbladh@anyfinetworks.com>
Date:   Fri,  1 Oct 2021 15:03:44 +0200
From:   Johan Almbladh <johan.almbladh@...finetworks.com>
To:     ast@...nel.org, daniel@...earbox.net, andrii@...nel.org
Cc:     kafai@...com, songliubraving@...com, yhs@...com,
        john.fastabend@...il.com, kpsingh@...nel.org, iii@...ux.ibm.com,
        paul@...ium.io, yangtiezhu@...ngson.cn, netdev@...r.kernel.org,
        bpf@...r.kernel.org,
        Johan Almbladh <johan.almbladh@...finetworks.com>
Subject: [PATCH bpf-next 06/10] bpf/tests: Minor restructuring of ALU tests

This patch moves the ALU LSH/RSH/ARSH reference computations into the
common reference value function. Also fix typo in constants so they
now have the intended values.

Signed-off-by: Johan Almbladh <johan.almbladh@...finetworks.com>
---
 lib/test_bpf.c | 137 +++++++++++++++++++++++--------------------------
 1 file changed, 65 insertions(+), 72 deletions(-)

diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 201f34060eef..919323a3b69f 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -538,6 +538,57 @@ static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
 	return __bpf_fill_max_jmp(self, BPF_JLT, 0);
 }
 
+/* ALU result computation used in tests */
+static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
+{
+	*res = 0;
+	switch (op) {
+	case BPF_MOV:
+		*res = v2;
+		break;
+	case BPF_AND:
+		*res = v1 & v2;
+		break;
+	case BPF_OR:
+		*res = v1 | v2;
+		break;
+	case BPF_XOR:
+		*res = v1 ^ v2;
+		break;
+	case BPF_LSH:
+		*res = v1 << v2;
+		break;
+	case BPF_RSH:
+		*res = v1 >> v2;
+		break;
+	case BPF_ARSH:
+		*res = v1 >> v2;
+		if (v2 > 0 && v1 > S64_MAX)
+			*res |= ~0ULL << (64 - v2);
+		break;
+	case BPF_ADD:
+		*res = v1 + v2;
+		break;
+	case BPF_SUB:
+		*res = v1 - v2;
+		break;
+	case BPF_MUL:
+		*res = v1 * v2;
+		break;
+	case BPF_DIV:
+		if (v2 == 0)
+			return false;
+		*res = div64_u64(v1, v2);
+		break;
+	case BPF_MOD:
+		if (v2 == 0)
+			return false;
+		div64_u64_rem(v1, v2, res);
+		break;
+	}
+	return true;
+}
+
 /* Test an ALU shift operation for all valid shift values */
 static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
 				u8 mode, bool alu32)
@@ -576,37 +627,19 @@ static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
 					insn[i++] = BPF_ALU32_IMM(op, R1, imm);
 				else
 					insn[i++] = BPF_ALU32_REG(op, R1, R2);
-				switch (op) {
-				case BPF_LSH:
-					val = (u32)reg << imm;
-					break;
-				case BPF_RSH:
-					val = (u32)reg >> imm;
-					break;
-				case BPF_ARSH:
-					val = (u32)reg >> imm;
-					if (imm > 0 && (reg & 0x80000000))
-						val |= ~(u32)0 << (32 - imm);
-					break;
-				}
+
+				if (op == BPF_ARSH)
+					reg = (s32)reg;
+				else
+					reg = (u32)reg;
+				__bpf_alu_result(&val, reg, imm, op);
+				val = (u32)val;
 			} else {
 				if (mode == BPF_K)
 					insn[i++] = BPF_ALU64_IMM(op, R1, imm);
 				else
 					insn[i++] = BPF_ALU64_REG(op, R1, R2);
-				switch (op) {
-				case BPF_LSH:
-					val = (u64)reg << imm;
-					break;
-				case BPF_RSH:
-					val = (u64)reg >> imm;
-					break;
-				case BPF_ARSH:
-					val = (u64)reg >> imm;
-					if (imm > 0 && reg < 0)
-						val |= ~(u64)0 << (64 - imm);
-					break;
-				}
+				__bpf_alu_result(&val, reg, imm, op);
 			}
 
 			/*
@@ -799,46 +832,6 @@ static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
  * test is designed to verify e.g. the ALU and ALU64 operations for JITs that
  * emit different code depending on the magnitude of the immediate value.
  */
-
-static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
-{
-	*res = 0;
-	switch (op) {
-	case BPF_MOV:
-		*res = v2;
-		break;
-	case BPF_AND:
-		*res = v1 & v2;
-		break;
-	case BPF_OR:
-		*res = v1 | v2;
-		break;
-	case BPF_XOR:
-		*res = v1 ^ v2;
-		break;
-	case BPF_ADD:
-		*res = v1 + v2;
-		break;
-	case BPF_SUB:
-		*res = v1 - v2;
-		break;
-	case BPF_MUL:
-		*res = v1 * v2;
-		break;
-	case BPF_DIV:
-		if (v2 == 0)
-			return false;
-		*res = div64_u64(v1, v2);
-		break;
-	case BPF_MOD:
-		if (v2 == 0)
-			return false;
-		div64_u64_rem(v1, v2, res);
-		break;
-	}
-	return true;
-}
-
 static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
 				struct bpf_insn *insns, s64 dst, s64 imm)
 {
@@ -7881,7 +7874,7 @@ static struct bpf_test tests[] = {
 		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful return",
 		.u.insns_int = {
 			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
-			BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+			BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
 			BPF_ALU64_REG(BPF_MOV, R0, R1),
 			BPF_STX_MEM(BPF_DW, R10, R1, -40),
 			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
@@ -7898,7 +7891,7 @@ static struct bpf_test tests[] = {
 		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful store",
 		.u.insns_int = {
 			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
-			BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+			BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
 			BPF_ALU64_REG(BPF_MOV, R0, R1),
 			BPF_STX_MEM(BPF_DW, R10, R0, -40),
 			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
@@ -7916,7 +7909,7 @@ static struct bpf_test tests[] = {
 		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure return",
 		.u.insns_int = {
 			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
-			BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+			BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
 			BPF_ALU64_REG(BPF_MOV, R0, R1),
 			BPF_ALU64_IMM(BPF_ADD, R0, 1),
 			BPF_STX_MEM(BPF_DW, R10, R1, -40),
@@ -7934,7 +7927,7 @@ static struct bpf_test tests[] = {
 		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure store",
 		.u.insns_int = {
 			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
-			BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+			BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
 			BPF_ALU64_REG(BPF_MOV, R0, R1),
 			BPF_ALU64_IMM(BPF_ADD, R0, 1),
 			BPF_STX_MEM(BPF_DW, R10, R1, -40),
@@ -7953,11 +7946,11 @@ static struct bpf_test tests[] = {
 		"BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test side effects",
 		.u.insns_int = {
 			BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
-			BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+			BPF_LD_IMM64(R2, 0xfedcba9876543210ULL),
 			BPF_ALU64_REG(BPF_MOV, R0, R1),
 			BPF_STX_MEM(BPF_DW, R10, R1, -40),
 			BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
-			BPF_LD_IMM64(R0, 0xfecdba9876543210ULL),
+			BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
 			BPF_JMP_REG(BPF_JNE, R0, R2, 1),
 			BPF_ALU64_REG(BPF_SUB, R0, R2),
 			BPF_EXIT_INSN(),
-- 
2.30.2

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ