lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251106125255.1969938-11-hao.sun@inf.ethz.ch>
Date: Thu,  6 Nov 2025 13:52:48 +0100
From: Hao Sun <sunhao.th@...il.com>
To: bpf@...r.kernel.org
Cc: ast@...nel.org,
	daniel@...earbox.net,
	andrii@...nel.org,
	eddyz87@...il.com,
	john.fastabend@...il.com,
	martin.lau@...ux.dev,
	song@...nel.org,
	yonghong.song@...ux.dev,
	linux-kernel@...r.kernel.org,
	sunhao.th@...il.com,
	Hao Sun <hao.sun@....ethz.ch>
Subject: [PATCH RFC 10/17] bpf: Add bcf_alu() 32bits optimization

Lower symbolic ALU nodes to 32-bit when both operands and the resulting dst
fit in 32 bits, to reduce solver/proof complexity.

- Extend `bcf_alu()` with `op_u32`/`op_s32` hints and derive a `zext` decision:
  when ALU32 or both operands/results fit u32, emit a 32-bit op and zero-extend
  to 64; when signed-32 is in effect, sign-extend to 64 after the op.

- Compute `op_u32`/`op_s32` for pointer and scalar ALUs (using fit_u32/fit_s32)
  before emitting the node, then mask them again with the post-ALU dst range so
  the final node width reflects the verifier’s bounds.

This shrinks many BV nodes and helps keep per-node vlen within limits (U8_MAX),
reducing proof size.

Signed-off-by: Hao Sun <hao.sun@....ethz.ch>
---
 kernel/bpf/verifier.c | 30 +++++++++++++++++++++++++-----
 1 file changed, 25 insertions(+), 5 deletions(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 66682d365e5e..df6d16a1c6f6 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -14866,11 +14866,13 @@ static int sanitize_check_bounds(struct bpf_verifier_env *env,
 }
 
 static int bcf_alu(struct bpf_verifier_env *env, struct bpf_reg_state *dst_reg,
-		   struct bpf_reg_state *src_reg, u8 op, bool alu32)
+		   struct bpf_reg_state *src_reg, u8 op, bool alu32,
+		   bool op_u32, bool op_s32)
 {
 	DEFINE_RAW_FLEX(struct bcf_expr, alu_expr, args, 2);
 	bool unary = (op == BPF_NEG);
 	int dst, src = 0, bits;
+	bool zext = alu32 || op_u32;
 
 	if (!env->bcf.tracking)
 		return 0;
@@ -14879,6 +14881,7 @@ static int bcf_alu(struct bpf_verifier_env *env, struct bpf_reg_state *dst_reg,
 		return 0;
 	}
 
+	alu32 |= (op_u32 || op_s32);
 	dst = bcf_reg_expr(env, dst_reg, alu32);
 	if (!unary)
 		src = bcf_reg_expr(env, src_reg, alu32);
@@ -14892,8 +14895,11 @@ static int bcf_alu(struct bpf_verifier_env *env, struct bpf_reg_state *dst_reg,
 	alu_expr->args[0] = dst;
 	alu_expr->args[1] = src;
 	dst_reg->bcf_expr = bcf_add_expr(env, alu_expr);
-	if (alu32)
+	if (zext)
 		bcf_zext_32_to_64(env, dst_reg);
+	else if (op_s32)
+		bcf_sext_32_to_64(env, dst_reg);
+
 	if (dst_reg->bcf_expr < 0)
 		return dst_reg->bcf_expr;
 
@@ -14922,6 +14928,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 	u8 opcode = BPF_OP(insn->code);
 	u32 dst = insn->dst_reg;
 	int ret, bounds_ret;
+	bool op_u32, op_s32;
 
 	dst_reg = &regs[dst];
 	src_reg = dst_reg == ptr_reg ? off_reg : ptr_reg;
@@ -15034,6 +15041,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 			if (dst_reg->bcf_expr < 0)
 				return dst_reg->bcf_expr;
 		}
+		op_u32 = fit_u32(dst_reg) && fit_u32(src_reg);
+		op_s32 = fit_s32(dst_reg) && fit_s32(src_reg);
 		/* A new variable offset is created.  Note that off_reg->off
 		 * == 0, since it's a scalar.
 		 * dst_reg gets the pointer type and since some positive
@@ -15062,7 +15071,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 			memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
 		}
 
-		ret = bcf_alu(env, dst_reg, src_reg, opcode, false);
+		op_u32 &= fit_u32(dst_reg);
+		op_s32 &= fit_s32(dst_reg);
+		ret = bcf_alu(env, dst_reg, src_reg, opcode, false, op_u32, op_s32);
 		if (ret)
 			return ret;
 		break;
@@ -15102,6 +15113,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 			if (dst_reg->bcf_expr < 0)
 				return dst_reg->bcf_expr;
 		}
+		op_u32 = fit_u32(dst_reg) && fit_u32(src_reg);
+		op_s32 = fit_s32(dst_reg) && fit_s32(src_reg);
 		/* A new variable offset is created.  If the subtrahend is known
 		 * nonnegative, then any reg->range we had before is still good.
 		 */
@@ -15130,7 +15143,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 				memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
 		}
 
-		ret = bcf_alu(env, dst_reg, src_reg, opcode, false);
+		op_u32 &= fit_u32(dst_reg);
+		op_s32 &= fit_s32(dst_reg);
+		ret = bcf_alu(env, dst_reg, src_reg, opcode, false, op_u32, op_s32);
 		if (ret)
 			return ret;
 		break;
@@ -15787,6 +15802,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 	u8 opcode = BPF_OP(insn->code);
 	bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
 	int ret, dst_expr = dst_reg->bcf_expr;
+	bool op_u32, op_s32;
 
 	if (!is_safe_to_compute_dst_reg_range(insn, &src_reg)) {
 		__mark_reg_unknown(env, dst_reg);
@@ -15806,6 +15822,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 		if (dst_expr < 0)
 			return dst_expr;
 	}
+	op_u32 = fit_u32(dst_reg) && fit_u32(&src_reg);
+	op_s32 = fit_s32(dst_reg) && fit_s32(&src_reg);
 
 	/* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
 	 * There are two classes of instructions: The first class we track both
@@ -15887,7 +15905,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 	reg_bounds_sync(dst_reg);
 
 	dst_reg->bcf_expr = dst_expr;
-	ret = bcf_alu(env, dst_reg, &src_reg, opcode, alu32);
+	op_u32 &= fit_u32(dst_reg);
+	op_s32 &= fit_s32(dst_reg);
+	ret = bcf_alu(env, dst_reg, &src_reg, opcode, alu32, op_u32, op_s32);
 	if (ret)
 		return ret;
 
-- 
2.34.1


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ